X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=26d9dfed0d162d5d7fb9853e68ae476d6083348b;hb=649af07205748358c6996a848e7e399e6be31d88;hp=a923d397be2edc662f801d18f81eb2c100886cee;hpb=e7facb1b66a7b3bfdd3dedd41739dcedd8de4b11;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a923d397..26d9dfed 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -39,7 +39,7 @@ use util::events::ClosureReason; use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; use util::logger::Logger; use util::errors::APIError; -use util::config::{UserConfig,ChannelConfig}; +use util::config::{UserConfig, ChannelConfig, ChannelHandshakeLimits}; use util::scid_utils::scid_from_parts; use io; @@ -305,6 +305,26 @@ pub(super) enum ChannelUpdateStatus { Disabled, } +/// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here. +#[derive(PartialEq)] +pub enum AnnouncementSigsState { + /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since + /// we sent the last `AnnouncementSignatures`. + NotSent, + /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected. + /// This state never appears on disk - instead we write `NotSent`. + MessageSent, + /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we + /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending + /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if + /// they send back a `RevokeAndACK`. + /// This state never appears on disk - instead we write `NotSent`. + Committed, + /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this + /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect. + PeerReceived, +} + /// An enum indicating whether the local or remote side offered a given HTLC. enum HTLCInitiator { LocalOffered, @@ -399,6 +419,7 @@ pub(super) struct MonitorRestoreUpdates { pub finalized_claimed_htlcs: Vec, pub funding_broadcastable: Option, pub funding_locked: Option, + pub announcement_sigs: Option, } /// The return value of `channel_reestablish` @@ -409,6 +430,7 @@ pub(super) struct ReestablishResponses { pub order: RAACommitmentOrder, pub mon_update: Option, pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>, + pub announcement_sigs: Option, pub shutdown_msg: Option, } @@ -462,10 +484,25 @@ pub(super) struct Channel { #[cfg(not(any(test, feature = "_test_utils")))] config: ChannelConfig, + inbound_handshake_limits_override: Option, + user_id: u64, channel_id: [u8; 32], channel_state: u32, + + // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to + // our peer. However, we want to make sure they received it, or else rebroadcast it when we + // next connect. + // We do so here, see `AnnouncementSigsSent` for more details on the state(s). + // Note that a number of our tests were written prior to the behavior here which retransmits + // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in + // many tests. + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) announcement_sigs_state: AnnouncementSigsState, + #[cfg(not(any(test, feature = "_test_utils")))] + announcement_sigs_state: AnnouncementSigsState, + secp_ctx: Secp256k1, channel_value_satoshis: u64, @@ -800,9 +837,11 @@ impl Channel { Ok(Channel { user_id, config: config.channel_options.clone(), + inbound_handshake_limits_override: Some(config.peer_channel_config_limits.clone()), channel_id: keys_provider.get_secure_random_bytes(), channel_state: ChannelState::OurInitSent as u32, + announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, channel_value_satoshis, @@ -906,14 +945,6 @@ impl Channel { fn check_remote_fee(fee_estimator: &F, feerate_per_kw: u32) -> Result<(), ChannelError> where F::Target: FeeEstimator { - let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing - // occasional issues with feerate disagreements between an initiator that wants a feerate - // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 - // sat/kw before the comparison here. - if feerate_per_kw + 250 < lower_limit { - return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); - } // We only bound the fee updates on the upper side to prevent completely absurd feerates, // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee. // We generally don't care too much if they set the feerate to something very high, but it @@ -923,6 +954,14 @@ impl Channel { if feerate_per_kw as u64 > upper_limit { return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit))); } + let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); + // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing + // occasional issues with feerate disagreements between an initiator that wants a feerate + // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 + // sat/kw before the comparison here. + if feerate_per_kw + 250 < lower_limit { + return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); + } Ok(()) } @@ -1098,9 +1137,11 @@ impl Channel { let chan = Channel { user_id, config: local_config, + inbound_handshake_limits_override: None, channel_id: msg.temporary_channel_id, channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), + announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, latest_monitor_update_id: 0, @@ -1774,7 +1815,9 @@ impl Channel { // Message handlers: - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: &InitFeatures) -> Result<(), ChannelError> { + pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + // Check sanity of message fields: if !self.is_outbound() { return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); @@ -1795,7 +1838,7 @@ impl Channel { if msg.htlc_minimum_msat >= full_channel_value_msat { return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); } - let max_delay_acceptable = u16::min(config.peer_channel_config_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); if msg.to_self_delay > max_delay_acceptable { return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); } @@ -1807,17 +1850,17 @@ impl Channel { } // Now check against optional parameters as set by config... - if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.peer_channel_config_limits.max_htlc_minimum_msat))); + if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); } - if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat))); + if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); } - if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.peer_channel_config_limits.max_channel_reserve_satoshis))); + if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); } - if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs))); + if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); } if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); @@ -1825,8 +1868,8 @@ impl Channel { if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); } - if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth { - return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", config.peer_channel_config_limits.max_minimum_depth, msg.minimum_depth))); + if msg.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); } if msg.minimum_depth == 0 { // Note that if this changes we should update the serialization minimum version to @@ -1879,6 +1922,7 @@ impl Channel { self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; self.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; + self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. Ok(()) } @@ -2068,7 +2112,10 @@ impl Channel { Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap())) } - pub fn funding_locked(&mut self, msg: &msgs::FundingLocked, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { + /// Handles a funding_locked message from our peer. If we've already sent our funding_locked + /// and the channel is now usable (and public), this may generate an announcement_signatures to + /// reply with. + pub fn funding_locked(&mut self, msg: &msgs::FundingLocked, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result, ChannelError> where L::Target: Logger { if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { self.workaround_lnd_bug_4006 = Some(msg.clone()); return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); @@ -2092,7 +2139,7 @@ impl Channel { return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point".to_owned())); } // They probably disconnected/reconnected and re-sent the funding_locked, which is required - return Ok(()); + return Ok(None); } else { return Err(ChannelError::Close("Peer sent a funding_locked at a strange time".to_owned())); } @@ -2102,7 +2149,7 @@ impl Channel { log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id())); - Ok(()) + Ok(self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger)) } /// Returns transaction if there is pending funding transaction that is yet to broadcast @@ -2992,6 +3039,10 @@ impl Channel { self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); self.cur_counterparty_commitment_transaction_number -= 1; + if self.announcement_sigs_state == AnnouncementSigsState::Committed { + self.announcement_sigs_state = AnnouncementSigsState::PeerReceived; + } + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); @@ -3268,6 +3319,11 @@ impl Channel { self.channel_state = ChannelState::ShutdownComplete as u32; return; } + + if self.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.announcement_sigs_state == AnnouncementSigsState::Committed { + self.announcement_sigs_state = AnnouncementSigsState::NotSent; + } + // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. self.last_sent_closing_fee = None; @@ -3344,7 +3400,7 @@ impl Channel { /// Indicates that the latest ChannelMonitor update has been committed by the client /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. - pub fn monitor_updating_restored(&mut self, logger: &L) -> MonitorRestoreUpdates where L::Target: Logger { + pub fn monitor_updating_restored(&mut self, logger: &L, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block_height: u32) -> MonitorRestoreUpdates where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32); self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32); @@ -3367,6 +3423,8 @@ impl Channel { }) } else { None }; + let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, best_block_height, logger); + let mut accepted_htlcs = Vec::new(); mem::swap(&mut accepted_htlcs, &mut self.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); @@ -3379,7 +3437,7 @@ impl Channel { self.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs }; } @@ -3398,7 +3456,7 @@ impl Channel { if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs } } @@ -3512,7 +3570,9 @@ impl Channel { /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. - pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result where L::Target: Logger { + pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L, + node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock) + -> Result where L::Target: Logger { if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we @@ -3556,6 +3616,8 @@ impl Channel { }) } else { None }; + let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger); + if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's. if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 || @@ -3569,7 +3631,7 @@ impl Channel { raa: None, commitment_update: None, mon_update: None, order: RAACommitmentOrder::CommitmentFirst, holding_cell_failed_htlcs: Vec::new(), - shutdown_msg + shutdown_msg, announcement_sigs, }); } @@ -3583,7 +3645,7 @@ impl Channel { raa: None, commitment_update: None, mon_update: None, order: RAACommitmentOrder::CommitmentFirst, holding_cell_failed_htlcs: Vec::new(), - shutdown_msg + shutdown_msg, announcement_sigs, }); } @@ -3635,7 +3697,7 @@ impl Channel { panic!("Got non-channel-failing result from free_holding_cell_htlcs"), Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => { Ok(ReestablishResponses { - funding_locked, shutdown_msg, + funding_locked, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: Some(commitment_update), order: self.resend_order.clone(), @@ -3645,7 +3707,7 @@ impl Channel { }, Ok((None, holding_cell_failed_htlcs)) => { Ok(ReestablishResponses { - funding_locked, shutdown_msg, + funding_locked, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: None, order: self.resend_order.clone(), @@ -3656,7 +3718,7 @@ impl Channel { } } else { Ok(ReestablishResponses { - funding_locked, shutdown_msg, + funding_locked, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: None, order: self.resend_order.clone(), @@ -3674,14 +3736,14 @@ impl Channel { if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { self.monitor_pending_commitment_signed = true; Ok(ReestablishResponses { - funding_locked, shutdown_msg, + funding_locked, shutdown_msg, announcement_sigs, commitment_update: None, raa: None, mon_update: None, order: self.resend_order.clone(), holding_cell_failed_htlcs: Vec::new(), }) } else { Ok(ReestablishResponses { - funding_locked, shutdown_msg, + funding_locked, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: Some(self.get_last_commitment_update(logger)), order: self.resend_order.clone(), @@ -4360,11 +4422,12 @@ impl Channel { /// When a transaction is confirmed, we check whether it is or spends the funding transaction /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. - pub fn transactions_confirmed(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L) - -> Result, ClosureReason> where L::Target: Logger { + pub fn transactions_confirmed(&mut self, block_hash: &BlockHash, height: u32, + txdata: &TransactionData, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L) + -> Result<(Option, Option), ClosureReason> where L::Target: Logger { let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); - for &(index_in_block, tx) in txdata.iter() { - if let Some(funding_txo) = self.get_funding_txo() { + if let Some(funding_txo) = self.get_funding_txo() { + for &(index_in_block, tx) in txdata.iter() { // If we haven't yet sent a funding_locked, but are in FundingSent (ignoring // whether they've sent a funding_locked or not), check if we should send one. if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 { @@ -4408,7 +4471,8 @@ impl Channel { // may have already happened for this block). if let Some(funding_locked) = self.check_get_funding_locked(height) { log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id)); - return Ok(Some(funding_locked)); + let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger); + return Ok((Some(funding_locked), announcement_sigs)); } } for inp in tx.input.iter() { @@ -4419,7 +4483,7 @@ impl Channel { } } } - Ok(None) + Ok((None, None)) } /// When a new block is connected, we check the height of the block against outbound holding @@ -4433,8 +4497,13 @@ impl Channel { /// /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. - pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32, logger: &L) - -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), ClosureReason> where L::Target: Logger { + pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L) + -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where L::Target: Logger { + self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_pk)), logger) + } + + fn do_best_block_updated(&mut self, height: u32, highest_header_time: u32, genesis_node_pk: Option<(BlockHash, PublicKey)>, logger: &L) + -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where L::Target: Logger { let mut timed_out_htlcs = Vec::new(); // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to // forward an HTLC when our counterparty should almost certainly just fail it for expiring @@ -4455,8 +4524,11 @@ impl Channel { self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time); if let Some(funding_locked) = self.check_get_funding_locked(height) { + let announcement_sigs = if let Some((genesis_block_hash, node_pk)) = genesis_node_pk { + self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger) + } else { None }; log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id)); - return Ok((Some(funding_locked), timed_out_htlcs)); + return Ok((Some(funding_locked), timed_out_htlcs, announcement_sigs)); } let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); @@ -4488,7 +4560,10 @@ impl Channel { return Err(ClosureReason::FundingTimedOut); } - Ok((None, timed_out_htlcs)) + let announcement_sigs = if let Some((genesis_block_hash, node_pk)) = genesis_node_pk { + self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger) + } else { None }; + Ok((None, timed_out_htlcs, announcement_sigs)) } /// Indicates the funding transaction is no longer confirmed in the main chain. This may @@ -4503,10 +4578,11 @@ impl Channel { // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. let best_time = self.update_time_counter; - match self.best_block_updated(reorg_height, best_time, logger) { - Ok((funding_locked, timed_out_htlcs)) => { + match self.do_best_block_updated(reorg_height, best_time, None, logger) { + Ok((funding_locked, timed_out_htlcs, announcement_sigs)) => { assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?"); assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); + assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?"); Ok(()) }, Err(e) => Err(e) @@ -4656,8 +4732,8 @@ impl Channel { /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly /// announceable and available for use (have exchanged FundingLocked messages in both - /// directions). Should be used for both loose and in response to an AnnouncementSignatures - /// message from the remote peer. + /// directions). Should be used for both broadcasted announcements and in response to an + /// AnnouncementSignatures message from the remote peer. /// /// Will only fail if we're not in a state where channel_announcement may be sent (including /// closing). @@ -4690,12 +4766,43 @@ impl Channel { Ok(msg) } - pub fn get_announcement_sigs(&self, node_pk: PublicKey, genesis_block_hash: BlockHash) -> Result { - let announcement = self.get_channel_announcement(node_pk, genesis_block_hash)?; - let (our_node_sig, our_bitcoin_sig) = self.holder_signer.sign_channel_announcement(&announcement, &self.secp_ctx) - .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; + fn get_announcement_sigs(&mut self, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block_height: u32, logger: &L) + -> Option where L::Target: Logger { + if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + return None; + } + + if !self.is_usable() { + return None; + } + + if self.channel_state & ChannelState::PeerDisconnected as u32 != 0 { + log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); + return None; + } + + if self.announcement_sigs_state != AnnouncementSigsState::NotSent { + return None; + } - Ok(msgs::AnnouncementSignatures { + log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id())); + let announcement = match self.get_channel_announcement(node_pk, genesis_block_hash) { + Ok(a) => a, + Err(_) => { + log_trace!(logger, "Cannot create an announcement_signatures as channel is not public."); + return None; + } + }; + let (our_node_sig, our_bitcoin_sig) = match self.holder_signer.sign_channel_announcement(&announcement, &self.secp_ctx) { + Err(_) => { + log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); + return None; + }, + Ok(v) => v + }; + self.announcement_sigs_state = AnnouncementSigsState::MessageSent; + + Some(msgs::AnnouncementSignatures { channel_id: self.channel_id(), short_channel_id: self.get_short_channel_id().unwrap(), node_signature: our_node_sig, @@ -4726,7 +4833,7 @@ impl Channel { /// Processes an incoming announcement_signatures message, providing a fully-signed /// channel_announcement message which we can broadcast and storing our counterparty's /// signatures for later reconstruction/rebroadcast of the channel_announcement. - pub fn announcement_signatures(&mut self, our_node_id: PublicKey, chain_hash: BlockHash, msg: &msgs::AnnouncementSignatures) -> Result { + pub fn announcement_signatures(&mut self, our_node_id: PublicKey, chain_hash: BlockHash, best_block_height: u32, msg: &msgs::AnnouncementSignatures) -> Result { let announcement = self.get_channel_announcement(our_node_id.clone(), chain_hash)?; let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); @@ -4743,13 +4850,20 @@ impl Channel { } self.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); + if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + return Err(ChannelError::Ignore( + "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned())); + } self.sign_channel_announcement(our_node_id, announcement) } /// Gets a signed channel_announcement for this channel, if we previously received an /// announcement_signatures from our counterparty. - pub fn get_signed_channel_announcement(&self, our_node_id: PublicKey, chain_hash: BlockHash) -> Option { + pub fn get_signed_channel_announcement(&self, our_node_id: PublicKey, chain_hash: BlockHash, best_block_height: u32) -> Option { + if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + return None; + } let announcement = match self.get_channel_announcement(our_node_id.clone(), chain_hash) { Ok(res) => res, Err(_) => return None, @@ -5030,6 +5144,10 @@ impl Channel { Err(e) => return Err(e), }; + if self.announcement_sigs_state == AnnouncementSigsState::MessageSent { + self.announcement_sigs_state = AnnouncementSigsState::Committed; + } + self.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, @@ -5289,6 +5407,29 @@ impl Readable for ChannelUpdateStatus { } } +impl Writeable for AnnouncementSigsState { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + // We only care about writing out the current state as if we had just disconnected, at + // which point we always set anything but AnnouncementSigsReceived to NotSent. + match self { + AnnouncementSigsState::NotSent => 0u8.write(writer), + AnnouncementSigsState::MessageSent => 0u8.write(writer), + AnnouncementSigsState::Committed => 0u8.write(writer), + AnnouncementSigsState::PeerReceived => 1u8.write(writer), + } + } +} + +impl Readable for AnnouncementSigsState { + fn read(reader: &mut R) -> Result { + Ok(match ::read(reader)? { + 0 => AnnouncementSigsState::NotSent, + 1 => AnnouncementSigsState::PeerReceived, + _ => return Err(DecodeError::InvalidValue), + }) + } +} + impl Writeable for Channel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been @@ -5557,6 +5698,7 @@ impl Writeable for Channel { (11, self.monitor_pending_finalized_fulfills, vec_type), (13, self.channel_creation_height, required), (15, preimages, vec_type), + (17, self.announcement_sigs_state, required), }); Ok(()) @@ -5809,6 +5951,10 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel let mut channel_creation_height = Some(serialized_height); let mut preimages_opt: Option>> = None; + // If we read an old Channel, for simplicity we just treat it as "we never sent an + // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine. + let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent); + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -5822,6 +5968,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel (11, monitor_pending_finalized_fulfills, vec_type), (13, channel_creation_height, option), (15, preimages_opt, vec_type), + (17, announcement_sigs_state, option), }); if let Some(preimages) = preimages_opt { @@ -5862,8 +6009,14 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel user_id, config: config.unwrap(), + + // Note that we don't care about serializing handshake limits as we only ever serialize + // channel data after the handshake has completed. + inbound_handshake_limits_override: None, + channel_id, channel_state, + announcement_sigs_state: announcement_sigs_state.unwrap(), secp_ctx, channel_value_satoshis, @@ -6013,6 +6166,13 @@ mod tests { "MAX_FUNDING_SATOSHIS is greater than all satoshis in existence"); } + #[test] + fn test_no_fee_check_overflow() { + // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in + // arithmetic, causing a panic with debug assertions enabled. + assert!(Channel::::check_remote_fee(&&TestFeeEstimator { fee_est: 42 }, u32::max_value()).is_err()); + } + struct Keys { signer: InMemorySigner, } @@ -6122,7 +6282,7 @@ mod tests { // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.get_accept_channel(); accept_channel_msg.dust_limit_satoshis = 546; - node_a_chan.accept_channel(&accept_channel_msg, &config, &InitFeatures::known()).unwrap(); + node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap(); node_a_chan.holder_dust_limit_satoshis = 1560; // Put some inbound and outbound HTLCs in A's channel. @@ -6239,7 +6399,7 @@ mod tests { // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.get_accept_channel(); - node_a_chan.accept_channel(&accept_channel_msg, &config, &InitFeatures::known()).unwrap(); + node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap(); // Node A --> Node B: funding created let output_script = node_a_chan.get_funding_redeemscript();