use secp256k1;
use ln::msgs;
-use ln::msgs::{DecodeError, OptionalField};
+use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
use ln::channelmonitor::ChannelMonitor;
use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash};
use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
use std;
use std::default::Default;
use std::{cmp,mem};
-use std::time::Instant;
use std::sync::{Arc};
#[cfg(test)]
/// See AwaitingRemoteRevoke ChannelState for more info
enum HTLCUpdateAwaitingACK {
- AddHTLC {
+ AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
// always outbound
amount_msat: u64,
cltv_expiry: u32,
payment_hash: PaymentHash,
source: HTLCSource,
onion_routing_packet: msgs::OnionPacket,
- time_created: Instant, //TODO: Some kind of timeout thing-a-majig
},
ClaimHTLC {
payment_preimage: PaymentPreimage,
cur_local_commitment_transaction_number: u64,
cur_remote_commitment_transaction_number: u64,
value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
- /// Upon receipt of a channel_reestablish we have to figure out whether to send a
- /// revoke_and_ack first or a commitment update first. Generally, we prefer to send
- /// revoke_and_ack first, but if we had a pending commitment update of our own waiting on a
- /// remote revoke when we received the latest commitment update from the remote we have to make
- /// sure that commitment update gets resent first.
- received_commitment_while_awaiting_raa: bool,
pending_inbound_htlcs: Vec<InboundHTLCOutput>,
pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
+ /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
+ /// need to ensure we resend them in the order we originally generated them. Note that because
+ /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
+ /// sufficient to simply set this to the opposite of any message we are generating as we
+ /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
+ /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
+ /// send it first.
+ resend_order: RAACommitmentOrder,
+
monitor_pending_revoke_and_ack: bool,
monitor_pending_commitment_signed: bool,
- monitor_pending_order: Option<RAACommitmentOrder>,
monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1000 // TODO
}
- fn derive_minimum_depth(_channel_value_satoshis_msat: u64, _value_to_self_msat: u64) -> u32 {
- // Note that in order to comply with BOLT 7 announcement_signatures requirements this must
- // be at least 6.
- const CONF_TARGET: u32 = 12; //TODO: Should be much higher
- CONF_TARGET
- }
-
// Constructors:
pub fn new_outbound(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, APIError> {
let chan_keys = keys_provider.get_channel_keys(false);
cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat: channel_value_satoshis * 1000 - push_msat,
- received_commitment_while_awaiting_raa: false,
pending_inbound_htlcs: Vec::new(),
pending_outbound_htlcs: Vec::new(),
next_remote_htlc_id: 0,
channel_update_count: 1,
+ resend_order: RAACommitmentOrder::CommitmentFirst,
+
monitor_pending_revoke_and_ack: false,
monitor_pending_commitment_signed: false,
- monitor_pending_order: None,
monitor_pending_forwards: Vec::new(),
monitor_pending_failures: Vec::new(),
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, ChannelError> {
+ pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, ChannelError> {
let chan_keys = keys_provider.get_channel_keys(true);
let mut local_config = (*config).channel_options.clone();
}
// Now check against optional parameters as set by config...
- if msg.funding_satoshis < config.channel_limits.min_funding_satoshis {
+ if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis {
return Err(ChannelError::Close("funding satoshis is less than the user specified limit"));
}
- if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
+ if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
}
- if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
+ if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
}
- if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
+ if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
}
- if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
+ if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
}
- if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
+ if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
}
- if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
+ if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
}
// Convert things into internal flags and prep our state:
let their_announce = if (msg.channel_flags & 1) == 1 { true } else { false };
- if config.channel_limits.force_announced_channel_preference {
+ if config.peer_channel_config_limits.force_announced_channel_preference {
if local_config.announced_channel != their_announce {
return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours"));
}
channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint);
channel_monitor.set_their_to_self_delay(msg.to_self_delay);
+ let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &OptionalField::Present(ref script) => {
+ // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
+ if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
+ Some(script.clone())
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ } else if script.len() == 0 {
+ None
+ // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
+ } else {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &OptionalField::Absent => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
+ }
+ }
+ } else { None };
+
let mut chan = Channel {
user_id: user_id,
config: local_config,
cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat: msg.push_msat,
- received_commitment_while_awaiting_raa: false,
pending_inbound_htlcs: Vec::new(),
pending_outbound_htlcs: Vec::new(),
next_remote_htlc_id: 0,
channel_update_count: 1,
+ resend_order: RAACommitmentOrder::CommitmentFirst,
+
monitor_pending_revoke_and_ack: false,
monitor_pending_commitment_signed: false,
- monitor_pending_order: None,
monitor_pending_forwards: Vec::new(),
monitor_pending_failures: Vec::new(),
our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(msg.feerate_per_kw as u64),
their_to_self_delay: msg.to_self_delay,
their_max_accepted_htlcs: msg.max_accepted_htlcs,
- minimum_depth: Channel::derive_minimum_depth(msg.funding_satoshis*1000, msg.push_msat),
+ minimum_depth: config.own_channel_config.minimum_depth,
their_funding_pubkey: Some(msg.funding_pubkey),
their_revocation_basepoint: Some(msg.revocation_basepoint),
their_prev_commitment_point: None,
their_node_id: their_node_id,
- their_shutdown_scriptpubkey: None,
+ their_shutdown_scriptpubkey,
channel_monitor: channel_monitor,
let value_to_b = if local { value_to_remote } else { value_to_self };
if value_to_a >= (dust_limit_satoshis as i64) {
+ log_trace!(self, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
txouts.push((TxOut {
script_pubkey: chan_utils::get_revokeable_redeemscript(&keys.revocation_key,
if local { self.their_to_self_delay } else { BREAKDOWN_TIMEOUT },
}
if value_to_b >= (dust_limit_satoshis as i64) {
+ log_trace!(self, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
txouts.push((TxOut {
script_pubkey: Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
.push_slice(&Hash160::hash(&keys.b_payment_key.serialize())[..])
// Message handlers:
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig) -> Result<(), ChannelError> {
+ pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_local_features: LocalFeatures) -> Result<(), ChannelError> {
// Check sanity of message fields:
if !self.channel_outbound {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer"));
}
// Now check against optional parameters as set by config...
- if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
+ if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
}
- if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
+ if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
}
- if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
+ if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
}
- if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
+ if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
}
- if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
+ if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
}
- if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
+ if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
}
- if msg.minimum_depth > config.channel_limits.max_minimum_depth {
+ if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
return Err(ChannelError::Close("We consider the minimum depth to be unreasonably large"));
}
+ let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &OptionalField::Present(ref script) => {
+ // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
+ if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
+ Some(script.clone())
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ } else if script.len() == 0 {
+ None
+ // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
+ } else {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &OptionalField::Absent => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
+ }
+ }
+ } else { None };
+
self.channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint);
self.their_dust_limit_satoshis = msg.dust_limit_satoshis;
self.their_delayed_payment_basepoint = Some(msg.delayed_payment_basepoint);
self.their_htlc_basepoint = Some(msg.htlc_basepoint);
self.their_cur_commitment_point = Some(msg.first_per_commitment_point);
+ self.their_shutdown_scriptpubkey = their_shutdown_scriptpubkey;
let obscure_factor = self.get_commitment_transaction_number_obscure_factor();
self.channel_monitor.set_commitment_obscure_factor(obscure_factor);
(htlc_outbound_count as u32, htlc_outbound_value_msat)
}
+ /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat.
+ /// Doesn't bother handling the
+ /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
+ /// corner case properly.
+ pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) {
+ // Note that we have to handle overflow due to the above case.
+ (cmp::min(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 - self.get_inbound_pending_htlc_stats().1 as i64, 0) as u64,
+ cmp::min(self.value_to_self_msat as i64 - self.get_outbound_pending_htlc_stats().1 as i64, 0) as u64)
+ }
+
pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state"));
}
}
- if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
- // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
- self.monitor_pending_order = None;
- }
-
self.channel_monitor.provide_latest_local_commitment_tx_info(local_commitment_tx.0, local_keys, self.feerate_per_kw, htlcs_and_sigs);
for htlc in self.pending_inbound_htlcs.iter_mut() {
self.cur_local_commitment_transaction_number -= 1;
self.last_local_commitment_txn = new_local_commitment_txn;
- self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0;
+ // Note that if we need_our_commitment & !AwaitingRemoteRevoke we'll call
+ // send_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.resend_order = RAACommitmentOrder::CommitmentFirst;
if (self.channel_state & ChannelState::MonitorUpdateFailed as u32) != 0 {
// In case we initially failed monitor updating without requiring a response, we need
// to make sure the RAA gets sent first.
- if !self.monitor_pending_commitment_signed {
- self.monitor_pending_order = Some(RAACommitmentOrder::RevokeAndACKFirst);
- }
self.monitor_pending_revoke_and_ack = true;
if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
// If we were going to send a commitment_signed after the RAA, go ahead and do all
self.their_prev_commitment_point = self.their_cur_commitment_point;
self.their_cur_commitment_point = Some(msg.next_per_commitment_point);
self.cur_remote_commitment_transaction_number -= 1;
- self.received_commitment_while_awaiting_raa = false;
- if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
- // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
- self.monitor_pending_order = None;
- }
log_trace!(self, "Updating HTLCs on receipt of RAA...");
let mut to_forward_infos = Vec::new();
// When the monitor updating is restored we'll call get_last_commitment_update(),
// which does not update state, but we're definitely now awaiting a remote revoke
// before we can step forward any more, so set it here.
- self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+ self.send_commitment_no_status_check()?;
}
self.monitor_pending_forwards.append(&mut to_forward_infos);
self.monitor_pending_failures.append(&mut revoked_htlcs);
/// Indicates that a ChannelMonitor update failed to be stored by the client and further
/// updates are partially paused.
/// This must be called immediately after the call which generated the ChannelMonitor update
- /// which failed, with the order argument set to the type of call it represented (ie a
- /// commitment update or a revoke_and_ack generation). The messages which were generated from
- /// that original call must *not* have been sent to the remote end, and must instead have been
- /// dropped. They will be regenerated when monitor_updating_restored is called.
- pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
+ /// which failed. The messages which were generated from that call which generated the
+ /// monitor update failure must *not* have been sent to the remote end, and must instead
+ /// have been dropped. They will be regenerated when monitor_updating_restored is called.
+ pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
self.monitor_pending_revoke_and_ack = resend_raa;
self.monitor_pending_commitment_signed = resend_commitment;
- self.monitor_pending_order = Some(order);
assert!(self.monitor_pending_forwards.is_empty());
mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards);
assert!(self.monitor_pending_failures.is_empty());
mem::swap(&mut failures, &mut self.monitor_pending_failures);
if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
- // Leave monitor_pending_order so we can order our channel_reestablish responses
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures);
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- let order = self.monitor_pending_order.clone().unwrap();
+ let order = self.resend_order.clone();
log_trace!(self, "Restored monitor updating resulting in {} commitment update and {} RAA, with {} first",
if commitment_update.is_some() { "a" } else { "no" },
if raa.is_some() { "an" } else { "no" },
})
} else { None };
- let order = self.monitor_pending_order.clone().unwrap_or(if self.received_commitment_while_awaiting_raa {
- RAACommitmentOrder::CommitmentFirst
- } else {
- RAACommitmentOrder::RevokeAndACKFirst
- });
-
if msg.next_local_commitment_number == our_next_remote_commitment_number {
if required_revoke.is_some() {
log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
log_debug!(self, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
}
- if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 &&
- self.monitor_pending_order.is_none() { // monitor_pending_order indicates we're waiting on a response to a unfreeze
+ if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 {
// We're up-to-date and not waiting on a remote revoke (if we are our
// channel_reestablish should result in them sending a revoke_and_ack), but we may
// have received some updates while we were disconnected. Free the holding cell
match self.free_holding_cell_htlcs() {
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
- Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order, shutdown_msg)),
- Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)),
+ Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
+ Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
}
} else {
- return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg));
+ return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg));
}
} else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 {
if required_revoke.is_some() {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
self.monitor_pending_commitment_signed = true;
- return Ok((resend_funding_locked, None, None, None, order, shutdown_msg));
+ return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg));
}
- return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order, shutdown_msg));
+ return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, self.resend_order.clone(), shutdown_msg));
} else {
return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
}
htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key),
first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret),
channel_flags: if self.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: OptionalField::Absent
+ shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
}
}
delayed_payment_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.delayed_payment_base_key),
htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key),
first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret),
- shutdown_scriptpubkey: OptionalField::Absent
+ shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
}
}
cltv_expiry: cltv_expiry,
source,
onion_routing_packet: onion_routing_packet,
- time_created: Instant::now(),
});
return Ok(None);
}
htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason);
}
}
+ self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() {
Ok((res, (remote_commitment_tx, mut htlcs))) => {
self.cur_remote_commitment_transaction_number.write(writer)?;
self.value_to_self_msat.write(writer)?;
- self.received_commitment_while_awaiting_raa.write(writer)?;
-
let mut dropped_inbound_htlcs = 0;
for htlc in self.pending_inbound_htlcs.iter() {
if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
(self.holding_cell_htlc_updates.len() as u64).write(writer)?;
for update in self.holding_cell_htlc_updates.iter() {
match update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, time_created: _ } => {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
0u8.write(writer)?;
amount_msat.write(writer)?;
cltv_expiry.write(writer)?;
payment_hash.write(writer)?;
source.write(writer)?;
onion_routing_packet.write(writer)?;
- // time_created is not serialized - we re-init the timeout upon deserialization
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
1u8.write(writer)?;
}
}
+ match self.resend_order {
+ RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
+ RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
+ }
+
self.monitor_pending_revoke_and_ack.write(writer)?;
self.monitor_pending_commitment_signed.write(writer)?;
- match self.monitor_pending_order {
- None => 0u8.write(writer)?,
- Some(RAACommitmentOrder::CommitmentFirst) => 1u8.write(writer)?,
- Some(RAACommitmentOrder::RevokeAndACKFirst) => 2u8.write(writer)?,
- }
(self.monitor_pending_forwards.len() as u64).write(writer)?;
for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() {
let cur_remote_commitment_transaction_number = Readable::read(reader)?;
let value_to_self_msat = Readable::read(reader)?;
- let received_commitment_while_awaiting_raa = Readable::read(reader)?;
-
let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize));
for _ in 0..pending_inbound_htlc_count {
payment_hash: Readable::read(reader)?,
source: Readable::read(reader)?,
onion_routing_packet: Readable::read(reader)?,
- time_created: Instant::now(),
},
1 => HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: Readable::read(reader)?,
});
}
- let monitor_pending_revoke_and_ack = Readable::read(reader)?;
- let monitor_pending_commitment_signed = Readable::read(reader)?;
-
- let monitor_pending_order = match <u8 as Readable<R>>::read(reader)? {
- 0 => None,
- 1 => Some(RAACommitmentOrder::CommitmentFirst),
- 2 => Some(RAACommitmentOrder::RevokeAndACKFirst),
+ let resend_order = match <u8 as Readable<R>>::read(reader)? {
+ 0 => RAACommitmentOrder::CommitmentFirst,
+ 1 => RAACommitmentOrder::RevokeAndACKFirst,
_ => return Err(DecodeError::InvalidValue),
};
+ let monitor_pending_revoke_and_ack = Readable::read(reader)?;
+ let monitor_pending_commitment_signed = Readable::read(reader)?;
+
let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize));
for _ in 0..monitor_pending_forwards_count {
cur_remote_commitment_transaction_number,
value_to_self_msat,
- received_commitment_while_awaiting_raa,
pending_inbound_htlcs,
pending_outbound_htlcs,
holding_cell_htlc_updates,
+ resend_order,
+
monitor_pending_revoke_and_ack,
monitor_pending_commitment_signed,
- monitor_pending_order,
monitor_pending_forwards,
monitor_pending_failures,