X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=a471ca3f3675fcab67677381164c7dc69dd8903a;hb=1da1ffa04b7e7b5bb7c19fb9109bfd6c62f5a612;hp=d47d534c913537dff705a3dd8815abb9fa44241b;hpb=d271d74bc71cf8a825be38734aaceb57e2d5d0a3;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d47d534c..a471ca3f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,13 +28,13 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::features::{InitFeatures, NodeFeatures}; use ln::router::Route; -use ln::features::InitFeatures; use ln::msgs; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; -use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys}; +use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys}; use util::config::UserConfig; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, Writeable, Writer}; @@ -152,7 +152,7 @@ pub struct PaymentHash(pub [u8;32]); #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentPreimage(pub [u8;32]); -type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@ -210,7 +210,7 @@ impl MsgHandleErrInternal { } } #[inline] - fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { ChannelError::Ignore(msg) => LightningError { @@ -292,16 +292,20 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case /// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents -/// issues such as overly long function definitions. -pub type SimpleArcChannelManager = Arc, Arc>>; +/// issues such as overly long function definitions. Note that the ChannelManager can take any +/// type that implements KeysInterface for its keys manager, but this type alias chooses the +/// concrete type of the KeysManager. +pub type SimpleArcChannelManager = Arc, Arc, Arc, Arc>>; /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes). /// But if this is not necessary, using a reference is more efficient. Defining these type aliases -/// helps with issues such as long function definitions. -pub type SimpleRefChannelManager<'a, 'b, M, T> = ChannelManager; +/// helps with issues such as long function definitions. Note that the ChannelManager can take any +/// type that implements KeysInterface for its keys manager, but this type alias chooses the +/// concrete type of the KeysManager. +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, M, T, F> = ChannelManager; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. @@ -316,7 +320,7 @@ pub type SimpleRefChannelManager<'a, 'b, M, T> = ChannelManager = ChannelManager +pub struct ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { default_configuration: UserConfig, genesis_hash: Sha256dHash, - fee_estimator: Arc, + fee_estimator: F, monitor: M, tx_broadcaster: T, @@ -362,6 +368,10 @@ pub struct ChannelManager channel_state: Mutex>, our_network_key: SecretKey, + /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this + /// value increases strictly since we don't assume access to a time source. + last_node_announcement_serial: AtomicUsize, + /// The bulk of our storage will eventually be here (channels and message queues and the like). /// If we are connected to a peer we always at least have an entry here, even if no channels /// are currently open with that peer. @@ -376,7 +386,7 @@ pub struct ChannelManager /// Taken first everywhere where we are making changes before any other locks. total_consistency_lock: RwLock<()>, - keys_manager: Arc>, + keys_manager: K, logger: Arc, } @@ -457,21 +467,41 @@ pub struct ChannelDetails { } macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr, $locked_channel_state: expr) => { + ($self: ident, $internal: expr, $their_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { + #[cfg(debug_assertions)] + { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + assert!($self.channel_state.try_lock().is_ok()); + } + + let mut msg_events = Vec::with_capacity(2); + if let Some((shutdown_res, update_option)) = shutdown_finish { $self.finish_force_close_channel(shutdown_res); if let Some(update) = update_option { - $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } } + log_error!($self, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { - } else { $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() }); } + } else { + msg_events.push(events::MessageSendEvent::HandleError { + node_id: $their_node_id, + action: err.action.clone() + }); + } + + if !msg_events.is_empty() { + $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events); + } + // Return error in case higher-API need one Err(err) }, @@ -484,7 +514,7 @@ macro_rules! break_chan_entry { match $res { Ok(res) => res, Err(ChannelError::Ignore(msg)) => { - break Err(MsgHandleErrInternal::from_chan_no_close::(ChannelError::Ignore(msg), $entry.key().clone())) + break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) }, Err(ChannelError::Close(msg)) => { log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); @@ -492,8 +522,7 @@ macro_rules! break_chan_entry { if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); } - break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) - }, + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) }, Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } } } @@ -504,7 +533,7 @@ macro_rules! try_chan_entry { match $res { Ok(res) => res, Err(ChannelError::Ignore(msg)) => { - return Err(MsgHandleErrInternal::from_chan_no_close::(ChannelError::Ignore(msg), $entry.key().clone())) + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) }, Err(ChannelError::Close(msg)) => { log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); @@ -512,7 +541,7 @@ macro_rules! try_chan_entry { if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); } - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) }, Err(ChannelError::CloseDelayBroadcast { msg, update }) => { log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg); @@ -520,23 +549,17 @@ macro_rules! try_chan_entry { if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); } - if let Some(update) = update { - if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update.clone()) { - match e { - // Upstream channel is dead, but we want at least to fail backward HTLCs to save - // downstream channels. In case of PermanentFailure, we are not going to be able - // to claim back to_remote output on remote commitment transaction. Doesn't - // make a difference here, we are concern about HTLCs circuit, not onchain funds. - ChannelMonitorUpdateErr::PermanentFailure => {}, - ChannelMonitorUpdateErr::TemporaryFailure => {}, - } + if let Err(e) = $self.monitor.update_monitor(chan.get_funding_txo().unwrap(), update) { + match e { + // Upstream channel is dead, but we want at least to fail backward HTLCs to save + // downstream channels. In case of PermanentFailure, we are not going to be able + // to claim back to_remote output on remote commitment transaction. Doesn't + // make a difference here, we are concern about HTLCs circuit, not onchain funds. + ChannelMonitorUpdateErr::PermanentFailure => {}, + ChannelMonitorUpdateErr::TemporaryFailure => {}, } } - let mut shutdown_res = chan.force_shutdown(); - if shutdown_res.0.len() >= 1 { - log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid()); - } - shutdown_res.0.clear(); + let shutdown_res = chan.force_shutdown(false); return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok())) } } @@ -564,7 +587,7 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())); + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())); res }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -587,7 +610,7 @@ macro_rules! handle_monitor_err { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - Err(MsgHandleErrInternal::from_chan_no_close::(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) + Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) }, } } @@ -614,9 +637,11 @@ macro_rules! maybe_break_monitor_err { } } -impl ChannelManager +impl ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { /// Constructs a new ChannelManager to hold several channels and route between them. /// @@ -636,13 +661,13 @@ impl ChannelManager, monitor: M, tx_broadcaster: T, logger: Arc,keys_manager: Arc>, config: UserConfig, current_blockchain_height: usize) -> Result, secp256k1::Error> { + pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: Arc, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); let res = ChannelManager { default_configuration: config.clone(), genesis_hash: genesis_block(network).header.bitcoin_hash(), - fee_estimator: feeest.clone(), + fee_estimator: fee_est, monitor, tx_broadcaster, @@ -659,6 +684,8 @@ impl ChannelManager ChannelManager 2**24 or push_msat is /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> { + pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option) -> Result<(), APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" }); } - let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?; - let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator); + let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; + let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), config)?; + let res = channel.get_open_channel(self.genesis_hash.clone(), &self.fee_estimator); let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = self.channel_state.lock().unwrap(); @@ -711,7 +739,7 @@ impl ChannelManager)) -> bool>(&self, f: F) -> Vec { + fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { let mut res = Vec::new(); { let channel_state = self.channel_state.lock().unwrap(); @@ -807,14 +835,17 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager = loop { - + let mut channel_lock = self.channel_state.lock().unwrap(); let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), Some(id) => id.clone(), @@ -1203,8 +1233,8 @@ impl ChannelManager { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + Some((update_add, commitment_signed, monitor_update)) => { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) // that we will resent the commitment update once we unfree monitor @@ -1231,7 +1261,7 @@ impl ChannelManager unreachable!(), Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) } } @@ -1250,18 +1280,17 @@ impl ChannelManager { (chan.get_outbound_funding_created(funding_txo) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None) } else { unreachable!(); }) , chan) }, None => return }; - match handle_error!(self, res, chan.get_their_node_id(), channel_state) { + match handle_error!(self, res, chan.get_their_node_id()) { Ok(funding_msg) => { (chan, funding_msg.0, funding_msg.1) }, @@ -1269,16 +1298,13 @@ impl ChannelManager { - { - let mut channel_state = self.channel_state.lock().unwrap(); - match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) { - Err(_) => { return; }, - Ok(()) => unreachable!(), - } + match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id()) { + Err(_) => { return; }, + Ok(()) => unreachable!(), } }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -1327,6 +1353,57 @@ impl ChannelManager) { + let _ = self.total_consistency_lock.read().unwrap(); + + if addresses.len() > 500 { + panic!("More than half the message size was taken up by public addresses!"); + } + + let announcement = msgs::UnsignedNodeAnnouncement { + features: NodeFeatures::supported(), + timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, + node_id: self.get_our_node_id(), + rgb, alias, addresses, + excess_address_data: Vec::new(), + excess_data: Vec::new(), + }; + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); + + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { + msg: msgs::NodeAnnouncement { + signature: self.secp_ctx.sign(&msghash, &self.our_network_key), + contents: announcement + }, + }); + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -1438,7 +1515,7 @@ impl ChannelManager res, Err(e) => { // We surely failed send_commitment due to bad keys, in that case @@ -1454,17 +1531,15 @@ impl ChannelManager { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; - match handle_error!(self, err, their_node_id, channel_state) { - Ok(_) => unreachable!(), - Err(_) => { continue; }, - } + handle_errors.push((their_node_id, err)); + continue; } }; - if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } @@ -1517,11 +1592,8 @@ impl ChannelManager 0 { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - for (their_node_id, err) in handle_errors.drain(..) { - let _ = handle_error!(self, err, their_node_id, channel_state_lock); - } + for (their_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, their_node_id); } if new_events.is_empty() { return } @@ -1737,8 +1809,8 @@ impl ChannelManager { - if let Some(chan_monitor) = monitor_option { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + if let Some(monitor_update) = monitor_option { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { assert!(msgs.is_none()); } else { @@ -1773,7 +1845,8 @@ impl ChannelManager ChannelManager { - // TODO: There may be some pending HTLCs that we intended to fail - // backwards when a monitor update failed. We should make sure - // knowledge of those gets moved into the appropriate in-memory - // ChannelMonitor and they get failed backwards once we get - // on-chain confirmations. - // Note I think #198 addresses this, so once it's merged a test - // should be written. - if let Some(short_id) = channel.get_short_channel_id() { - short_to_id.remove(&short_id); - } - close_results.push(channel.force_shutdown()); - if let Ok(update) = self.get_channel_update(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - false - }, - ChannelMonitorUpdateErr::TemporaryFailure => true, - } - } else { - let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(); - if !pending_forwards.is_empty() { - htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); - } - htlc_failures.append(&mut pending_failures); + let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) { + Some(chan) => chan, + None => return, + }; + if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id { + return; + } - macro_rules! handle_cs { () => { - if let Some(update) = commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: channel.get_their_node_id(), - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = raa { - pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: channel.get_their_node_id(), - msg: revoke_and_ack, - }); - } - } } - match order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if needs_broadcast_safe { - pending_events.push(events::Event::FundingBroadcastSafe { - funding_txo: channel.get_funding_txo().unwrap(), - user_channel_id: channel.get_user_id(), - }); - } - if let Some(msg) = funding_locked { - pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: channel.get_their_node_id(), - msg, - }); - if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_their_node_id(), - msg: announcement_sigs, - }); - } - short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); - } - true - } - } else { true } - }); + let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(); + if !pending_forwards.is_empty() { + htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); + } + htlc_failures.append(&mut pending_failures); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: channel.get_their_node_id(), + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: channel.get_their_node_id(), + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + if needs_broadcast_safe { + pending_events.push(events::Event::FundingBroadcastSafe { + funding_txo: channel.get_funding_txo().unwrap(), + user_channel_id: channel.get_user_id(), + }); + } + if let Some(msg) = funding_locked { + pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: channel.get_their_node_id(), + msg, + }); + if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_their_node_id(), + msg: announcement_sigs, + }); + } + short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); + } } self.pending_events.lock().unwrap().append(&mut pending_events); @@ -1897,7 +1965,7 @@ impl ChannelManager ChannelManager { // Note that we reply with the new channel_id in error messages if we gave up on the // channel, not the temporary_channel_id. This is compatible with ourselves, but the // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for // any messages referencing a previously-closed channel anyway. - return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None)); + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None)); }, ChannelMonitorUpdateErr::TemporaryFailure => { // There's no problem signing a counterparty's funding transaction if our monitor @@ -1999,8 +2067,17 @@ impl ChannelManager try_chan_entry!(self, Err(e), channel_state, chan), + Err((Some(monitor_update), e)) => { + assert!(chan.get().is_awaiting_monitor_update()); + let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update); + try_chan_entry!(self, Err(e), channel_state, chan); + unreachable!(); + }, + Ok(update) => update, + }; + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) @@ -2057,7 +2134,7 @@ impl ChannelManager ChannelManager ChannelManager = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set"); + let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set"); try_chan_entry!(self, Err(chan_err), channel_state, chan); } try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); @@ -2257,9 +2334,18 @@ impl ChannelManager try_chan_entry!(self, Err(e), channel_state, chan), + Err((Some(update), e)) => { + assert!(chan.get().is_awaiting_monitor_update()); + let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), update); + try_chan_entry!(self, Err(e), channel_state, chan); + unreachable!(); + }, + Ok(res) => res + }; + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); //TODO: Rebroadcast closing_signed if present on monitor update restoration } @@ -2334,9 +2420,9 @@ impl ChannelManager ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -2405,7 +2491,7 @@ impl ChannelManager = ChannelError::Close("Bad announcement_signatures node_signature"); + let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature"); try_chan_entry!(self, Err(chan_err), channel_state, chan); } @@ -2436,10 +2522,10 @@ impl ChannelManager ChannelManager Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state_lock = self.channel_state.lock().unwrap(); let their_node_id; let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel_id) { @@ -2522,10 +2608,10 @@ impl ChannelManager ChannelManager unreachable!(), Err(e) => { Err(APIError::APIMisuseError { err: e.err })} } } } -impl events::MessageSendEventsProvider for ChannelManager +impl events::MessageSendEventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { fn get_and_clear_pending_msg_events(&self) -> Vec { // TODO: Event release to users and serialization is currently race-y: it's very easy for a @@ -2580,9 +2668,11 @@ impl events::MessageSendEventsProvi } } -impl events::EventsProvider for ChannelManager +impl events::EventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { fn get_and_clear_pending_events(&self) -> Vec { // TODO: Event release to users and serialization is currently race-y: it's very easy for a @@ -2608,9 +2698,12 @@ impl events::EventsProvider for Cha } } -impl ChainListener for ChannelManager +impl + ChainListener for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) { let header_hash = header.bitcoin_hash(); @@ -2657,7 +2750,7 @@ impl Ch // It looks like our counterparty went on-chain. We go ahead and // broadcast our latest local state as well here, just in case its // some kind of SPV attack, though we expect these to be dropped. - failed_channels.push(channel.force_shutdown()); + failed_channels.push(channel.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -2672,11 +2765,10 @@ impl Ch if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } - failed_channels.push(channel.force_shutdown()); // If would_broadcast_at_height() is true, the channel_monitor will broadcast // the latest local tx for us, so we should skip that here (it doesn't really // hurt anything, but does make tests a bit simpler). - failed_channels.last_mut().unwrap().0 = Vec::new(); + failed_channels.push(channel.force_shutdown(false)); if let Ok(update) = self.get_channel_update(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -2692,6 +2784,18 @@ impl Ch } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; + loop { + // Update last_node_announcement_serial to be the max of its current value and the + // block timestamp. This should keep us close to the current time without relying on + // having an explicit local time source. + // Just in case we end up in a race, we loop until we either successfully update + // last_node_announcement_serial or decide we don't need to. + let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire); + if old_serial >= header.time as usize { break; } + if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() { + break; + } + } } /// We force-close the channel without letting our counterparty participate in the shutdown @@ -2708,7 +2812,7 @@ impl Ch if let Some(short_id) = v.get_short_channel_id() { short_to_id.remove(&short_id); } - failed_channels.push(v.force_shutdown()); + failed_channels.push(v.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&v) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -2728,152 +2832,91 @@ impl Ch } } -impl ChannelMessageHandler for ChannelManager +impl + ChannelMessageHandler for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_open_channel(their_node_id, their_features, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_open_channel(their_node_id, their_features, msg), *their_node_id); } fn handle_accept_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_accept_channel(their_node_id, their_features, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_accept_channel(their_node_id, their_features, msg), *their_node_id); } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_funding_created(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_funding_created(their_node_id, msg), *their_node_id); } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_funding_signed(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_funding_signed(their_node_id, msg), *their_node_id); } fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_funding_locked(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_funding_locked(their_node_id, msg), *their_node_id); } fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_shutdown(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_shutdown(their_node_id, msg), *their_node_id); } fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_closing_signed(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_closing_signed(their_node_id, msg), *their_node_id); } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_update_add_htlc(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), *their_node_id); } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_update_fulfill_htlc(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), *their_node_id); } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_update_fail_htlc(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), *their_node_id); } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_update_fail_malformed_htlc(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), *their_node_id); } fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_commitment_signed(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_commitment_signed(their_node_id, msg), *their_node_id); } fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_revoke_and_ack(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), *their_node_id); } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_update_fee(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_update_fee(their_node_id, msg), *their_node_id); } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_announcement_signatures(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), *their_node_id); } fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { let _ = self.total_consistency_lock.read().unwrap(); - let res = self.internal_channel_reestablish(their_node_id, msg); - if res.is_err() { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let _ = handle_error!(self, res, *their_node_id, channel_state_lock); - } + let _ = handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), *their_node_id); } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { @@ -2893,7 +2936,7 @@ impl Ch if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } - failed_channels.push(chan.force_shutdown()); + failed_channels.push(chan.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -2940,6 +2983,7 @@ impl Ch &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, @@ -3032,8 +3076,8 @@ impl Writeable for PendingHTLCInfo { } } -impl Readable for PendingHTLCInfo { - fn read(reader: &mut R) -> Result { +impl Readable for PendingHTLCInfo { + fn read(reader: &mut R) -> Result { Ok(PendingHTLCInfo { onion_packet: Readable::read(reader)?, incoming_shared_secret: Readable::read(reader)?, @@ -3061,9 +3105,9 @@ impl Writeable for HTLCFailureMsg { } } -impl Readable for HTLCFailureMsg { - fn read(reader: &mut R) -> Result { - match >::read(reader)? { +impl Readable for HTLCFailureMsg { + fn read(reader: &mut R) -> Result { + match ::read(reader)? { 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)), 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)), _ => Err(DecodeError::InvalidValue), @@ -3087,9 +3131,9 @@ impl Writeable for PendingHTLCStatus { } } -impl Readable for PendingHTLCStatus { - fn read(reader: &mut R) -> Result { - match >::read(reader)? { +impl Readable for PendingHTLCStatus { + fn read(reader: &mut R) -> Result { + match ::read(reader)? { 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)), 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)), _ => Err(DecodeError::InvalidValue), @@ -3121,9 +3165,9 @@ impl Writeable for HTLCSource { } } -impl Readable for HTLCSource { - fn read(reader: &mut R) -> Result { - match >::read(reader)? { +impl Readable for HTLCSource { + fn read(reader: &mut R) -> Result { + match ::read(reader)? { 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), 1 => Ok(HTLCSource::OutboundRoute { route: Readable::read(reader)?, @@ -3152,9 +3196,9 @@ impl Writeable for HTLCFailReason { } } -impl Readable for HTLCFailReason { - fn read(reader: &mut R) -> Result { - match >::read(reader)? { +impl Readable for HTLCFailReason { + fn read(reader: &mut R) -> Result { + match ::read(reader)? { 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }), 1 => Ok(HTLCFailReason::Reason { failure_code: Readable::read(reader)?, @@ -3184,9 +3228,9 @@ impl Writeable for HTLCForwardInfo { } } -impl Readable for HTLCForwardInfo { - fn read(reader: &mut R) -> Result { - match >::read(reader)? { +impl Readable for HTLCForwardInfo { + fn read(reader: &mut R) -> Result { + match ::read(reader)? { 0 => Ok(HTLCForwardInfo::AddHTLC { prev_short_channel_id: Readable::read(reader)?, prev_htlc_id: Readable::read(reader)?, @@ -3201,9 +3245,11 @@ impl Readable for HTLCForwardInfo { } } -impl Writeable for ChannelManager +impl Writeable for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { let _ = self.total_consistency_lock.write().unwrap(); @@ -3256,6 +3302,8 @@ impl Writeable for Chan peer_state.latest_features.write(writer)?; } + (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; + Ok(()) } } @@ -3275,19 +3323,21 @@ impl Writeable for Chan /// 5) Move the ChannelMonitors into your local ManyChannelMonitor. /// 6) Disconnect/connect blocks on the ChannelManager. /// 7) Register the new ChannelManager with your ChainWatchInterface. -pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref> +pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { /// The keys provider which will give us relevant keys. Some keys will be loaded during /// deserialization. - pub keys_manager: Arc>, + pub keys_manager: K, /// The fee_estimator for use in the ChannelManager in the future. /// /// No calls to the FeeEstimator will be made during deserialization. - pub fee_estimator: Arc, + pub fee_estimator: F, /// The ManyChannelMonitor for use in the ChannelManager in the future. /// /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that @@ -3319,11 +3369,29 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: pub channel_monitors: &'a mut HashMap>, } -impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref> ReadableArgs> for (Sha256dHash, ChannelManager) +// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the +// SipmleArcChannelManager type: +impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref> + ReadableArgs> for (Sha256dHash, Arc>) where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T>) -> Result { + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F>) -> Result { + let (blockhash, chan_manager) = <(Sha256dHash, ChannelManager)>::read(reader, args)?; + Ok((blockhash, Arc::new(chan_manager))) + } +} + +impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref> + ReadableArgs> for (Sha256dHash, ChannelManager) + where M::Target: ManyChannelMonitor, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, +{ + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F>) -> Result { let _ver: u8 = Readable::read(reader)?; let min_ver: u8 = Readable::read(reader)?; if min_ver > SERIALIZATION_VERSION { @@ -3334,7 +3402,7 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T let latest_block_height: u32 = Readable::read(reader)?; let last_block_hash: Sha256dHash = Readable::read(reader)?; - let mut closed_channels = Vec::new(); + let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); @@ -3349,12 +3417,20 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { - if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() || - channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() || - channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() { - let mut force_close_res = channel.force_shutdown(); - force_close_res.0 = monitor.get_latest_local_commitment_txn(); - closed_channels.push(force_close_res); + if channel.get_cur_local_commitment_transaction_number() < monitor.get_cur_local_commitment_number() || + channel.get_revoked_remote_commitment_transaction_number() < monitor.get_min_seen_secret() || + channel.get_cur_remote_commitment_transaction_number() < monitor.get_cur_remote_commitment_number() || + channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() { + // If the channel is ahead of the monitor, return InvalidValue: + return Err(DecodeError::InvalidValue); + } else if channel.get_cur_local_commitment_transaction_number() > monitor.get_cur_local_commitment_number() || + channel.get_revoked_remote_commitment_transaction_number() > monitor.get_min_seen_secret() || + channel.get_cur_remote_commitment_transaction_number() > monitor.get_cur_remote_commitment_number() || + channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + // But if the channel is behind of the monitor, close the channel: + let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true); + failed_htlcs.append(&mut new_failed_htlcs); + monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster); } else { if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); @@ -3368,7 +3444,7 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { - closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new())); + monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster); } } @@ -3406,6 +3482,8 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } + let last_node_announcement_serial: u32 = Readable::read(reader)?; + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -3425,6 +3503,8 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T }), our_network_key: args.keys_manager.get_node_secret(), + last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), + per_peer_state: RwLock::new(per_peer_state), pending_events: Mutex::new(Vec::new()), @@ -3434,12 +3514,13 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref, T default_configuration: args.default_config, }; - for close_res in closed_channels.drain(..) { - channel_manager.finish_force_close_channel(close_res); - //TODO: Broadcast channel update for closed channels, but only after we've made a - //connection or two. + for htlc_source in failed_htlcs.drain(..) { + channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } + //TODO: Broadcast channel update for closed channels, but only after we've made a + //connection or two. + Ok((last_block_hash.clone(), channel_manager)) } }