X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannel.rs;h=c1eaad02384793a1770a622790df1c90461324ae;hb=5bb7ba5b7d7f81977705f0ed5652c9924c7a1d56;hp=74728e3a96fd7f0290aae550c9909a8a4b977469;hpb=bb094f1e30ad449149b78edcf294ff99bbd2d8d9;p=rust-lightning diff --git a/src/ln/channel.rs b/src/ln/channel.rs index 74728e3a..c1eaad02 100644 --- a/src/ln/channel.rs +++ b/src/ln/channel.rs @@ -16,9 +16,9 @@ use secp256k1::{Secp256k1,Signature}; use secp256k1; use ln::msgs; -use ln::msgs::{DecodeError, OptionalField}; +use ln::msgs::{DecodeError, OptionalField, LocalFeatures, DataLossProtect}; use ln::channelmonitor::ChannelMonitor; -use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash}; +use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT}; use ln::chan_utils; use chain::chaininterface::{FeeEstimator,ConfirmationTarget}; @@ -32,8 +32,7 @@ use util::config::{UserConfig,ChannelConfig}; use std; use std::default::Default; -use std::{cmp,mem}; -use std::time::Instant; +use std::{cmp,mem,fmt}; use std::sync::{Arc}; #[cfg(test)] @@ -133,14 +132,13 @@ struct OutboundHTLCOutput { /// See AwaitingRemoteRevoke ChannelState for more info enum HTLCUpdateAwaitingACK { - AddHTLC { + AddHTLC { // TODO: Time out if we're getting close to cltv_expiry // always outbound amount_msat: u64, cltv_expiry: u32, payment_hash: PaymentHash, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, - time_created: Instant, //TODO: Some kind of timeout thing-a-majig }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -183,9 +181,9 @@ enum ChannelState { /// "disconnected" and no updates are allowed until after we've done a channel_reestablish /// dance. PeerDisconnected = (1 << 7), - /// Flag which is set on ChannelFunded and FundingSent indicating the user has told us they - /// failed to update our ChannelMonitor somewhere and we should pause sending any outbound - /// messages until they've managed to do so. + /// Flag which is set on ChannelFunded, FundingCreated, and FundingSent indicating the user has + /// told us they failed to update our ChannelMonitor somewhere and we should pause sending any + /// outbound messages until they've managed to do so. MonitorUpdateFailed = (1 << 8), /// Flag which implies that we have sent a commitment_signed but are awaiting the responding /// revoke_and_ack message. During this time period, we can't generate new commitment_signed @@ -237,19 +235,22 @@ pub(super) struct Channel { cur_local_commitment_transaction_number: u64, cur_remote_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees - /// Upon receipt of a channel_reestablish we have to figure out whether to send a - /// revoke_and_ack first or a commitment update first. Generally, we prefer to send - /// revoke_and_ack first, but if we had a pending commitment update of our own waiting on a - /// remote revoke when we received the latest commitment update from the remote we have to make - /// sure that commitment update gets resent first. - received_commitment_while_awaiting_raa: bool, pending_inbound_htlcs: Vec, pending_outbound_htlcs: Vec, holding_cell_htlc_updates: Vec, + /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always + /// need to ensure we resend them in the order we originally generated them. Note that because + /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is + /// sufficient to simply set this to the opposite of any message we are generating as we + /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending + /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should + /// send it first. + resend_order: RAACommitmentOrder, + + monitor_pending_funding_locked: bool, monitor_pending_revoke_and_ack: bool, monitor_pending_commitment_signed: bool, - monitor_pending_order: Option, monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -317,7 +318,7 @@ pub(super) struct Channel { their_htlc_minimum_msat: u64, our_htlc_minimum_msat: u64, their_to_self_delay: u16, - //implied by BREAKDOWN_TIMEOUT: our_to_self_delay: u16, + our_to_self_delay: u16, #[cfg(test)] pub their_max_accepted_htlcs: u16, #[cfg(not(test))] @@ -347,14 +348,6 @@ pub const OUR_MAX_HTLCS: u16 = 50; //TODO /// on ice until the funding transaction gets more confirmations, but the LN protocol doesn't /// really allow for this, so instead we're stuck closing it out at that point. const UNCONF_THRESHOLD: u32 = 6; -/// The amount of time we require our counterparty wait to claim their money (ie time between when -/// we, or our watchtower, must check for them having broadcast a theft transaction). -#[cfg(not(test))] -const BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; //TODO? -#[cfg(test)] -pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; //TODO? -/// The amount of time we're willing to wait to claim money back to us -const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 14; /// Exposing these two constants for use in test in ChannelMonitor pub const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; @@ -373,10 +366,23 @@ pub const OFFERED_HTLC_SCRIPT_WEIGHT: usize = 133; /// Used to return a simple Error back to ChannelManager. Will get converted to a /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our /// channel_id in ChannelManager. -#[derive(Debug)] pub(super) enum ChannelError { Ignore(&'static str), Close(&'static str), + CloseDelayBroadcast { + msg: &'static str, + update: Option + }, +} + +impl fmt::Debug for ChannelError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &ChannelError::Ignore(e) => write!(f, "Ignore : {}", e), + &ChannelError::Close(e) => write!(f, "Close : {}", e), + &ChannelError::CloseDelayBroadcast { msg, .. } => write!(f, "CloseDelayBroadcast : {}", msg) + } + } } macro_rules! secp_check { @@ -397,7 +403,7 @@ impl Channel { /// Returns a minimum channel reserve value **they** need to maintain /// /// Guaranteed to return a value no larger than channel_value_satoshis - fn get_our_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { + pub(crate) fn get_our_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { let (q, _) = channel_value_satoshis.overflowing_div(100); cmp::min(channel_value_satoshis, cmp::max(q, 1000)) //TODO } @@ -410,13 +416,6 @@ impl Channel { 1000 // TODO } - fn derive_minimum_depth(_channel_value_satoshis_msat: u64, _value_to_self_msat: u64) -> u32 { - // Note that in order to comply with BOLT 7 announcement_signatures requirements this must - // be at least 6. - const CONF_TARGET: u32 = 12; //TODO: Should be much higher - CONF_TARGET - } - // Constructors: pub fn new_outbound(fee_estimator: &FeeEstimator, keys_provider: &Arc, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc, config: &UserConfig) -> Result { let chan_keys = keys_provider.get_channel_keys(false); @@ -428,6 +427,9 @@ impl Channel { if push_msat > channel_value_satoshis * 1000 { return Err(APIError::APIMisuseError{err: "push value > channel value"}); } + if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT { + return Err(APIError::APIMisuseError{err: "Configured with an unreasonable our_to_self_delay putting user funds at risks"}); + } let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); @@ -439,7 +441,7 @@ impl Channel { let secp_ctx = Secp256k1::new(); let channel_monitor = ChannelMonitor::new(&chan_keys.revocation_base_key, &chan_keys.delayed_payment_base_key, - &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), BREAKDOWN_TIMEOUT, + &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay, keys_provider.get_destination_script(), logger.clone()); Ok(Channel { @@ -457,7 +459,6 @@ impl Channel { cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: channel_value_satoshis * 1000 - push_msat, - received_commitment_while_awaiting_raa: false, pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), @@ -468,9 +469,11 @@ impl Channel { next_remote_htlc_id: 0, channel_update_count: 1, + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_funding_locked: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_order: None, monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), @@ -496,6 +499,7 @@ impl Channel { their_htlc_minimum_msat: 0, our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(feerate), their_to_self_delay: 0, + our_to_self_delay: config.own_channel_config.our_to_self_delay, their_max_accepted_htlcs: 0, minimum_depth: 0, // Filled in in accept_channel @@ -529,10 +533,14 @@ impl Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc, their_node_id: PublicKey, msg: &msgs::OpenChannel, user_id: u64, logger: Arc, config: &UserConfig) -> Result { + pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc, their_node_id: PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc, config: &UserConfig) -> Result { let chan_keys = keys_provider.get_channel_keys(true); let mut local_config = (*config).channel_options.clone(); + if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT { + return Err(ChannelError::Close("Configured with an unreasonable our_to_self_delay putting user funds at risks")); + } + // Check sanity of message fields: if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS { return Err(ChannelError::Close("funding value > 2^24")); @@ -554,7 +562,7 @@ impl Channel { } Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?; - if msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT { + if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT { return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period")); } if msg.max_accepted_htlcs < 1 { @@ -565,32 +573,32 @@ impl Channel { } // Now check against optional parameters as set by config... - if msg.funding_satoshis < config.channel_limits.min_funding_satoshis { + if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis { return Err(ChannelError::Close("funding satoshis is less than the user specified limit")); } - if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat { + if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat { return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit")); } - if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat { + if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat { return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit")); } - if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis { + if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis { return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit")); } - if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs { + if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs { return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit")); } - if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis { + if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis { return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit")); } - if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis { + if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis { return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit")); } // Convert things into internal flags and prep our state: let their_announce = if (msg.channel_flags & 1) == 1 { true } else { false }; - if config.channel_limits.force_announced_channel_preference { + if config.peer_channel_config_limits.force_announced_channel_preference { if local_config.announced_channel != their_announce { return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours")); } @@ -627,11 +635,32 @@ impl Channel { let secp_ctx = Secp256k1::new(); let mut channel_monitor = ChannelMonitor::new(&chan_keys.revocation_base_key, &chan_keys.delayed_payment_base_key, - &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), BREAKDOWN_TIMEOUT, + &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay, keys_provider.get_destination_script(), logger.clone()); channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint); channel_monitor.set_their_to_self_delay(msg.to_self_delay); + let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() { + match &msg.shutdown_scriptpubkey { + &OptionalField::Present(ref script) => { + // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg + if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() { + Some(script.clone()) + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + } else if script.len() == 0 { + None + // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel + } else { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format")); + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &OptionalField::Absent => { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out")); + } + } + } else { None }; + let mut chan = Channel { user_id: user_id, config: local_config, @@ -646,7 +675,6 @@ impl Channel { cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: msg.push_msat, - received_commitment_while_awaiting_raa: false, pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), @@ -657,9 +685,11 @@ impl Channel { next_remote_htlc_id: 0, channel_update_count: 1, + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_funding_locked: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_order: None, monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), @@ -686,8 +716,9 @@ impl Channel { their_htlc_minimum_msat: msg.htlc_minimum_msat, our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(msg.feerate_per_kw as u64), their_to_self_delay: msg.to_self_delay, + our_to_self_delay: config.own_channel_config.our_to_self_delay, their_max_accepted_htlcs: msg.max_accepted_htlcs, - minimum_depth: Channel::derive_minimum_depth(msg.funding_satoshis*1000, msg.push_msat), + minimum_depth: config.own_channel_config.minimum_depth, their_funding_pubkey: Some(msg.funding_pubkey), their_revocation_basepoint: Some(msg.revocation_basepoint), @@ -699,7 +730,7 @@ impl Channel { their_prev_commitment_point: None, their_node_id: their_node_id, - their_shutdown_scriptpubkey: None, + their_shutdown_scriptpubkey, channel_monitor: channel_monitor, @@ -881,16 +912,19 @@ impl Channel { } } - let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; - let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset; + assert!(value_to_self_msat >= 0); + // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie + // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to + // "violate" their reserve value by couting those against it. Thus, we have to convert + // everything to i64 before subtracting as otherwise we can overflow. + let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + assert!(value_to_remote_msat >= 0); #[cfg(debug_assertions)] { // Make sure that the to_self/to_remote is always either past the appropriate // channel_reserve *or* it is making progress towards it. - // TODO: This should happen after fee calculation, but we don't handle that correctly - // yet! let mut max_commitment_tx_output = if generated_by_local { self.max_commitment_tx_output_local.lock().unwrap() } else { @@ -913,15 +947,17 @@ impl Channel { let value_to_b = if local { value_to_remote } else { value_to_self }; if value_to_a >= (dust_limit_satoshis as i64) { + log_trace!(self, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); txouts.push((TxOut { script_pubkey: chan_utils::get_revokeable_redeemscript(&keys.revocation_key, - if local { self.their_to_self_delay } else { BREAKDOWN_TIMEOUT }, + if local { self.their_to_self_delay } else { self.our_to_self_delay }, &keys.a_delayed_payment_key).to_v0_p2wsh(), value: value_to_a as u64 }, None)); } if value_to_b >= (dust_limit_satoshis as i64) { + log_trace!(self, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); txouts.push((TxOut { script_pubkey: Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0) .push_slice(&Hash160::hash(&keys.b_payment_key.serialize())[..]) @@ -930,7 +966,19 @@ impl Channel { }, None)); } - transaction_utils::sort_outputs(&mut txouts); + transaction_utils::sort_outputs(&mut txouts, |a, b| { + if let &Some(ref a_htlc) = a { + if let &Some(ref b_htlc) = b { + a_htlc.0.cltv_expiry.cmp(&b_htlc.0.cltv_expiry) + // Note that due to hash collisions, we have to have a fallback comparison + // here for fuzztarget mode (otherwise at least chanmon_fail_consistency + // may fail)! + .then(a_htlc.0.payment_hash.0.cmp(&b_htlc.0.payment_hash.0)) + // For non-HTLC outputs, if they're copying our SPK we don't really care if we + // close the channel due to mismatches - they're doing something dumb: + } else { cmp::Ordering::Equal } + } else { cmp::Ordering::Equal } + }); let mut outputs: Vec = Vec::with_capacity(txouts.len()); let mut htlcs_included: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(txouts.len() + included_dust_htlcs.len()); @@ -1006,7 +1054,7 @@ impl Channel { }, ())); } - transaction_utils::sort_outputs(&mut txouts); + transaction_utils::sort_outputs(&mut txouts, |_, _| { cmp::Ordering::Equal }); // Ordering doesnt matter if they used our pubkey... let mut outputs: Vec = Vec::new(); for out in txouts.drain(..) { @@ -1101,7 +1149,7 @@ impl Channel { /// @local is used only to convert relevant internal structures which refer to remote vs local /// to decide value of outputs and direction of HTLCs. fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys, feerate_per_kw: u64) -> Transaction { - chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.their_to_self_delay } else { BREAKDOWN_TIMEOUT }, htlc, &keys.a_delayed_payment_key, &keys.revocation_key) + chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.their_to_self_delay } else { self.our_to_self_delay }, htlc, &keys.a_delayed_payment_key, &keys.revocation_key) } fn create_htlc_tx_signature(&self, tx: &Transaction, htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Result<(Script, Signature, bool), ChannelError> { @@ -1329,7 +1377,7 @@ impl Channel { // Message handlers: - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig) -> Result<(), ChannelError> { + pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_local_features: LocalFeatures) -> Result<(), ChannelError> { // Check sanity of message fields: if !self.channel_outbound { return Err(ChannelError::Close("Got an accept_channel message from an inbound peer")); @@ -1355,7 +1403,7 @@ impl Channel { if msg.htlc_minimum_msat >= (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000 { return Err(ChannelError::Close("Minimum htlc value is full channel value")); } - if msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT { + if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT { return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period")); } if msg.max_accepted_htlcs < 1 { @@ -1366,28 +1414,49 @@ impl Channel { } // Now check against optional parameters as set by config... - if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat { + if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat { return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit")); } - if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat { + if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat { return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit")); } - if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis { + if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis { return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit")); } - if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs { + if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs { return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit")); } - if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis { + if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis { return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit")); } - if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis { + if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis { return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit")); } - if msg.minimum_depth > config.channel_limits.max_minimum_depth { + if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth { return Err(ChannelError::Close("We consider the minimum depth to be unreasonably large")); } + let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() { + match &msg.shutdown_scriptpubkey { + &OptionalField::Present(ref script) => { + // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg + if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() { + Some(script.clone()) + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + } else if script.len() == 0 { + None + // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel + } else { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format")); + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &OptionalField::Absent => { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out")); + } + } + } else { None }; + self.channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint); self.their_dust_limit_satoshis = msg.dust_limit_satoshis; @@ -1403,6 +1472,7 @@ impl Channel { self.their_delayed_payment_basepoint = Some(msg.delayed_payment_basepoint); self.their_htlc_basepoint = Some(msg.htlc_basepoint); self.their_cur_commitment_point = Some(msg.first_per_commitment_point); + self.their_shutdown_scriptpubkey = their_shutdown_scriptpubkey; let obscure_factor = self.get_commitment_transaction_number_obscure_factor(); self.channel_monitor.set_commitment_obscure_factor(obscure_factor); @@ -1484,7 +1554,7 @@ impl Channel { if !self.channel_outbound { return Err(ChannelError::Close("Received funding_signed for an inbound channel?")); } - if self.channel_state != ChannelState::FundingCreated as u32 { + if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 { return Err(ChannelError::Close("Received funding_signed in strange state!")); } if self.channel_monitor.get_min_seen_secret() != (1 << 48) || @@ -1505,10 +1575,14 @@ impl Channel { self.sign_commitment_transaction(&mut local_initial_commitment_tx, &msg.signature); self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys, self.feerate_per_kw, Vec::new()); self.last_local_commitment_txn = vec![local_initial_commitment_tx]; - self.channel_state = ChannelState::FundingSent as u32; + self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32)); self.cur_local_commitment_transaction_number -= 1; - Ok(self.channel_monitor.clone()) + if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { + Ok(self.channel_monitor.clone()) + } else { + Err(ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast")) + } } pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> { @@ -1523,10 +1597,13 @@ impl Channel { } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) { self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); self.channel_update_count += 1; - } else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 && - // Note that funding_signed/funding_created will have decremented both by 1! - self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && - self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { + } else if (self.channel_state & (ChannelState::ChannelFunded as u32) != 0 && + // Note that funding_signed/funding_created will have decremented both by 1! + self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && + self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1) || + // If we reconnected before sending our funding locked they may still resend theirs: + (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) == + (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32)) { if self.their_cur_commitment_point != Some(msg.next_per_commitment_point) { return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point")); } @@ -1569,6 +1646,16 @@ impl Channel { (htlc_outbound_count as u32, htlc_outbound_value_msat) } + /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat. + /// Doesn't bother handling the + /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC + /// corner case properly. + pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) { + // Note that we have to handle overflow due to the above case. + (cmp::min(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 - self.get_inbound_pending_htlc_stats().1 as i64, 0) as u64, + cmp::min(self.value_to_self_msat as i64 - self.get_outbound_pending_htlc_stats().1 as i64, 0) as u64) + } + pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError> { if (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state")); @@ -1587,15 +1674,31 @@ impl Channel { if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 { return Err(ChannelError::Close("Remote tried to push more than our max accepted HTLCs")); } - //TODO: Spec is unclear if this is per-direction or in total (I assume per direction): // Check our_max_htlc_value_in_flight_msat if htlc_inbound_value_msat + msg.amount_msat > Channel::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis) { - return Err(ChannelError::Close("Remote HTLC add would put them over their max HTLC value in flight")); + return Err(ChannelError::Close("Remote HTLC add would put them over our max HTLC value")); } // Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet // the reserve_satoshis we told them to always have as direct payment so that they lose // something if we punish them for broadcasting an old state). - if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 { + // Note that we don't really care about having a small/no to_remote output in our local + // commitment transactions, as the purpose of the channel reserve is to ensure we can + // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be + // present in the next commitment transaction we send them (at least for fulfilled ones, + // failed ones won't modify value_to_self). + // Note that we will send HTLCs which another instance of rust-lightning would think + // violate the reserve value if we do not do this (as we forget inbound HTLCs from the + // Channel state once they will not be present in the next received commitment + // transaction). + let mut removed_outbound_total_msat = 0; + for ref htlc in self.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(None) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(None) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } + } + if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 + removed_outbound_total_msat { return Err(ChannelError::Close("Remote HTLC add would put them over their reserve value")); } if self.next_remote_htlc_id != msg.htlc_id { @@ -1778,12 +1881,6 @@ impl Channel { } } - if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { - // This is a response to our post-monitor-failed unfreeze messages, so we can clear the - // monitor_pending_order requirement as we won't re-send the monitor_pending messages. - self.monitor_pending_order = None; - } - self.channel_monitor.provide_latest_local_commitment_tx_info(local_commitment_tx.0, local_keys, self.feerate_per_kw, htlcs_and_sigs); for htlc in self.pending_inbound_htlcs.iter_mut() { @@ -1806,14 +1903,13 @@ impl Channel { self.cur_local_commitment_transaction_number -= 1; self.last_local_commitment_txn = new_local_commitment_txn; - self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0; + // Note that if we need_our_commitment & !AwaitingRemoteRevoke we'll call + // send_commitment_no_status_check() next which will reset this to RAAFirst. + self.resend_order = RAACommitmentOrder::CommitmentFirst; if (self.channel_state & ChannelState::MonitorUpdateFailed as u32) != 0 { // In case we initially failed monitor updating without requiring a response, we need // to make sure the RAA gets sent first. - if !self.monitor_pending_commitment_signed { - self.monitor_pending_order = Some(RAACommitmentOrder::RevokeAndACKFirst); - } self.monitor_pending_revoke_and_ack = true; if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we were going to send a commitment_signed after the RAA, go ahead and do all @@ -1987,12 +2083,6 @@ impl Channel { self.their_prev_commitment_point = self.their_cur_commitment_point; self.their_cur_commitment_point = Some(msg.next_per_commitment_point); self.cur_remote_commitment_transaction_number -= 1; - self.received_commitment_while_awaiting_raa = false; - if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { - // This is a response to our post-monitor-failed unfreeze messages, so we can clear the - // monitor_pending_order requirement as we won't re-send the monitor_pending messages. - self.monitor_pending_order = None; - } log_trace!(self, "Updating HTLCs on receipt of RAA..."); let mut to_forward_infos = Vec::new(); @@ -2110,7 +2200,7 @@ impl Channel { // When the monitor updating is restored we'll call get_last_commitment_update(), // which does not update state, but we're definitely now awaiting a remote revoke // before we can step forward any more, so set it here. - self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; + self.send_commitment_no_status_check()?; } self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); @@ -2258,15 +2348,13 @@ impl Channel { /// Indicates that a ChannelMonitor update failed to be stored by the client and further /// updates are partially paused. /// This must be called immediately after the call which generated the ChannelMonitor update - /// which failed, with the order argument set to the type of call it represented (ie a - /// commitment update or a revoke_and_ack generation). The messages which were generated from - /// that original call must *not* have been sent to the remote end, and must instead have been - /// dropped. They will be regenerated when monitor_updating_restored is called. - pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) { + /// which failed. The messages which were generated from that call which generated the + /// monitor update failure must *not* have been sent to the remote end, and must instead + /// have been dropped. They will be regenerated when monitor_updating_restored is called. + pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0); self.monitor_pending_revoke_and_ack = resend_raa; self.monitor_pending_commitment_signed = resend_commitment; - self.monitor_pending_order = Some(order); assert!(self.monitor_pending_forwards.is_empty()); mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards); assert!(self.monitor_pending_failures.is_empty()); @@ -2277,20 +2365,38 @@ impl Channel { /// Indicates that the latest ChannelMonitor update has been committed by the client /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. - pub fn monitor_updating_restored(&mut self) -> (Option, Option, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) { + pub fn monitor_updating_restored(&mut self) -> (Option, Option, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option) { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32); self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32); + let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.channel_outbound; + + // Because we will never generate a FundingBroadcastSafe event when we're in + // MonitorUpdateFailed, if we assume the user only broadcast the funding transaction when + // they received the FundingBroadcastSafe event, we can only ever hit + // monitor_pending_funding_locked when we're an inbound channel which failed to persist the + // monitor on funding_created, and we even got the funding transaction confirmed before the + // monitor was persisted. + let funding_locked = if self.monitor_pending_funding_locked { + assert!(!self.channel_outbound, "Funding transaction broadcast without FundingBroadcastSafe!"); + self.monitor_pending_funding_locked = false; + let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); + let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); + Some(msgs::FundingLocked { + channel_id: self.channel_id(), + next_per_commitment_point: next_per_commitment_point, + }) + } else { None }; + let mut forwards = Vec::new(); mem::swap(&mut forwards, &mut self.monitor_pending_forwards); let mut failures = Vec::new(); mem::swap(&mut failures, &mut self.monitor_pending_failures); if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { - // Leave monitor_pending_order so we can order our channel_reestablish responses self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; - return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures); + return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked); } let raa = if self.monitor_pending_revoke_and_ack { @@ -2302,7 +2408,13 @@ impl Channel { self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; - (raa, commitment_update, self.monitor_pending_order.clone().unwrap(), forwards, failures) + let order = self.resend_order.clone(); + log_trace!(self, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first", + if needs_broadcast_safe { "a funding broadcast safe, " } else { "" }, + if commitment_update.is_some() { "a" } else { "no" }, + if raa.is_some() { "an" } else { "no" }, + match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); + (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked) } pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> { @@ -2380,7 +2492,7 @@ impl Channel { update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, - update_fee: None, //TODO: We need to support re-generating any update_fees in the last commitment_signed! + update_fee: None, commitment_signed: self.send_commitment_no_state_update().expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, } } @@ -2400,6 +2512,22 @@ impl Channel { return Err(ChannelError::Close("Peer sent a garbage channel_reestablish")); } + if msg.next_remote_commitment_number > 0 { + match msg.data_loss_protect { + OptionalField::Present(ref data_loss) => { + if chan_utils::build_commitment_secret(self.local_keys.commitment_seed, INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided")); + } + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number { + self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point); + return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone()) + }); + } + }, + OptionalField::Absent => {} + } + } + // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). self.channel_state &= !(ChannelState::PeerDisconnected as u32); @@ -2412,7 +2540,9 @@ impl Channel { } else { None }; if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { - if self.channel_state & ChannelState::OurFundingLocked as u32 == 0 { + // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's. + if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 || + self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { if msg.next_remote_commitment_number != 0 { return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet")); } @@ -2460,12 +2590,6 @@ impl Channel { }) } else { None }; - let order = self.monitor_pending_order.clone().unwrap_or(if self.received_commitment_while_awaiting_raa { - RAACommitmentOrder::CommitmentFirst - } else { - RAACommitmentOrder::RevokeAndACKFirst - }); - if msg.next_local_commitment_number == our_next_remote_commitment_number { if required_revoke.is_some() { log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); @@ -2473,20 +2597,19 @@ impl Channel { log_debug!(self, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 && - self.monitor_pending_order.is_none() { // monitor_pending_order indicates we're waiting on a response to a unfreeze + if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 { // We're up-to-date and not waiting on a remote revoke (if we are our // channel_reestablish should result in them sending a revoke_and_ack), but we may // have received some updates while we were disconnected. Free the holding cell // now! match self.free_holding_cell_htlcs() { Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)), - Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), - Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order, shutdown_msg)), - Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)), + Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), + Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)), + Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)), } } else { - return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)); } } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 { if required_revoke.is_some() { @@ -2497,10 +2620,10 @@ impl Channel { if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { self.monitor_pending_commitment_signed = true; - return Ok((resend_funding_locked, None, None, None, order, shutdown_msg)); + return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg)); } - return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order, shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, self.resend_order.clone(), shutdown_msg)); } else { return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction")); } @@ -2661,7 +2784,7 @@ impl Channel { } } - let proposed_sat_per_kw = msg.fee_satoshis * 1000 / closing_tx.get_weight(); + let proposed_sat_per_kw = msg.fee_satoshis * 1000 / closing_tx.get_weight() as u64; if self.channel_outbound { let our_max_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); if proposed_sat_per_kw > our_max_feerate { @@ -2768,7 +2891,6 @@ impl Channel { self.cur_remote_commitment_transaction_number + 2 } - //TODO: Testing purpose only, should be changed in another way after #81 #[cfg(test)] pub fn get_local_keys(&self) -> &ChannelKeys { &self.local_keys @@ -2910,12 +3032,17 @@ impl Channel { //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be //a protocol oversight, but I assume I'm just missing something. if need_commitment_update { - let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); - let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); - return Ok(Some(msgs::FundingLocked { - channel_id: self.channel_id, - next_per_commitment_point: next_per_commitment_point, - })); + if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { + let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); + let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); + return Ok(Some(msgs::FundingLocked { + channel_id: self.channel_id, + next_per_commitment_point: next_per_commitment_point, + })); + } else { + self.monitor_pending_funding_locked = true; + return Ok(None); + } } } } @@ -3008,7 +3135,7 @@ impl Channel { channel_reserve_satoshis: Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis), htlc_minimum_msat: self.our_htlc_minimum_msat, feerate_per_kw: fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u32, - to_self_delay: BREAKDOWN_TIMEOUT, + to_self_delay: self.our_to_self_delay, max_accepted_htlcs: OUR_MAX_HTLCS, funding_pubkey: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.funding_key), revocation_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.revocation_base_key), @@ -3017,7 +3144,7 @@ impl Channel { htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key), first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret), channel_flags: if self.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: OptionalField::Absent + shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() }) } } @@ -3041,7 +3168,7 @@ impl Channel { channel_reserve_satoshis: Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis), htlc_minimum_msat: self.our_htlc_minimum_msat, minimum_depth: self.minimum_depth, - to_self_delay: BREAKDOWN_TIMEOUT, + to_self_delay: self.our_to_self_delay, max_accepted_htlcs: OUR_MAX_HTLCS, funding_pubkey: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.funding_key), revocation_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.revocation_base_key), @@ -3049,7 +3176,7 @@ impl Channel { delayed_payment_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.delayed_payment_base_key), htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key), first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret), - shutdown_scriptpubkey: OptionalField::Absent + shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() }) } } @@ -3157,6 +3284,20 @@ impl Channel { pub fn get_channel_reestablish(&self) -> msgs::ChannelReestablish { assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); + let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap(); + log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret)); + OptionalField::Present(DataLossProtect { + your_last_per_commitment_secret: remote_last_secret, + my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number + 1)) + }) + } else { + log_debug!(self, "We don't seen yet any revoked secret, if this channnel has already been updated it means we are fallen-behind, you should wait for other peer closing"); + OptionalField::Present(DataLossProtect { + your_last_per_commitment_secret: [0;32], + my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number)) + }) + }; msgs::ChannelReestablish { channel_id: self.channel_id(), // The protocol has two different commitment number concepts - the "commitment @@ -3177,7 +3318,7 @@ impl Channel { // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't // overflow here. next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number - 1, - data_loss_protect: OptionalField::Absent, + data_loss_protect, } } @@ -3217,16 +3358,15 @@ impl Channel { if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 { return Err(ChannelError::Ignore("Cannot push more than their max accepted HTLCs")); } - //TODO: Spec is unclear if this is per-direction or in total (I assume per direction): // Check their_max_htlc_value_in_flight_msat if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat { - return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight")); + return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight our peer will accept")); } // Check self.their_channel_reserve_satoshis (the amount we must keep as // reserve for them to have something to claim if we misbehave) if self.value_to_self_msat < self.their_channel_reserve_satoshis * 1000 + amount_msat + htlc_outbound_value_msat { - return Err(ChannelError::Ignore("Cannot send value that would put us over the reserve value")); + return Err(ChannelError::Ignore("Cannot send value that would put us over their reserve value")); } //TODO: Check cltv_expiry? Do this in channel manager? @@ -3239,7 +3379,6 @@ impl Channel { cltv_expiry: cltv_expiry, source, onion_routing_packet: onion_routing_packet, - time_created: Instant::now(), }); return Ok(None); } @@ -3321,6 +3460,7 @@ impl Channel { htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason); } } + self.resend_order = RAACommitmentOrder::RevokeAndACKFirst; let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() { Ok((res, (remote_commitment_tx, mut htlcs))) => { @@ -3531,8 +3671,6 @@ impl Writeable for Channel { self.cur_remote_commitment_transaction_number.write(writer)?; self.value_to_self_msat.write(writer)?; - self.received_commitment_while_awaiting_raa.write(writer)?; - let mut dropped_inbound_htlcs = 0; for htlc in self.pending_inbound_htlcs.iter() { if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { @@ -3610,14 +3748,13 @@ impl Writeable for Channel { (self.holding_cell_htlc_updates.len() as u64).write(writer)?; for update in self.holding_cell_htlc_updates.iter() { match update { - &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, time_created: _ } => { + &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => { 0u8.write(writer)?; amount_msat.write(writer)?; cltv_expiry.write(writer)?; payment_hash.write(writer)?; source.write(writer)?; onion_routing_packet.write(writer)?; - // time_created is not serialized - we re-init the timeout upon deserialization }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { 1u8.write(writer)?; @@ -3632,13 +3769,14 @@ impl Writeable for Channel { } } + match self.resend_order { + RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?, + RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?, + } + + self.monitor_pending_funding_locked.write(writer)?; self.monitor_pending_revoke_and_ack.write(writer)?; self.monitor_pending_commitment_signed.write(writer)?; - match self.monitor_pending_order { - None => 0u8.write(writer)?, - Some(RAACommitmentOrder::CommitmentFirst) => 1u8.write(writer)?, - Some(RAACommitmentOrder::RevokeAndACKFirst) => 2u8.write(writer)?, - } (self.monitor_pending_forwards.len() as u64).write(writer)?; for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() { @@ -3693,6 +3831,7 @@ impl Writeable for Channel { self.their_htlc_minimum_msat.write(writer)?; self.our_htlc_minimum_msat.write(writer)?; self.their_to_self_delay.write(writer)?; + self.our_to_self_delay.write(writer)?; self.their_max_accepted_htlcs.write(writer)?; self.minimum_depth.write(writer)?; @@ -3736,8 +3875,6 @@ impl ReadableArgs> for Channel { let cur_remote_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let received_commitment_while_awaiting_raa = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); for _ in 0..pending_inbound_htlc_count { @@ -3786,7 +3923,6 @@ impl ReadableArgs> for Channel { payment_hash: Readable::read(reader)?, source: Readable::read(reader)?, onion_routing_packet: Readable::read(reader)?, - time_created: Instant::now(), }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -3800,16 +3936,16 @@ impl ReadableArgs> for Channel { }); } - let monitor_pending_revoke_and_ack = Readable::read(reader)?; - let monitor_pending_commitment_signed = Readable::read(reader)?; - - let monitor_pending_order = match >::read(reader)? { - 0 => None, - 1 => Some(RAACommitmentOrder::CommitmentFirst), - 2 => Some(RAACommitmentOrder::RevokeAndACKFirst), + let resend_order = match >::read(reader)? { + 0 => RAACommitmentOrder::CommitmentFirst, + 1 => RAACommitmentOrder::RevokeAndACKFirst, _ => return Err(DecodeError::InvalidValue), }; + let monitor_pending_funding_locked = Readable::read(reader)?; + let monitor_pending_revoke_and_ack = Readable::read(reader)?; + let monitor_pending_commitment_signed = Readable::read(reader)?; + let monitor_pending_forwards_count: u64 = Readable::read(reader)?; let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize)); for _ in 0..monitor_pending_forwards_count { @@ -3858,6 +3994,7 @@ impl ReadableArgs> for Channel { let their_htlc_minimum_msat = Readable::read(reader)?; let our_htlc_minimum_msat = Readable::read(reader)?; let their_to_self_delay = Readable::read(reader)?; + let our_to_self_delay = Readable::read(reader)?; let their_max_accepted_htlcs = Readable::read(reader)?; let minimum_depth = Readable::read(reader)?; @@ -3896,14 +4033,15 @@ impl ReadableArgs> for Channel { cur_remote_commitment_transaction_number, value_to_self_msat, - received_commitment_while_awaiting_raa, pending_inbound_htlcs, pending_outbound_htlcs, holding_cell_htlc_updates, + resend_order, + + monitor_pending_funding_locked, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_order, monitor_pending_forwards, monitor_pending_failures, @@ -3935,6 +4073,7 @@ impl ReadableArgs> for Channel { their_htlc_minimum_msat, our_htlc_minimum_msat, their_to_self_delay, + our_to_self_delay, their_max_accepted_htlcs, minimum_depth,