use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
-use bitcoin::util::hash::{BitcoinHash, Sha256dHash};
+use bitcoin::util::hash::BitcoinHash;
use bitcoin_hashes::{Hash, HashEngine};
use bitcoin_hashes::hmac::{Hmac, HmacEngine};
use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
use bitcoin_hashes::cmp::fixed_time_eq;
use secp256k1::key::{SecretKey,PublicKey};
use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use ln::router::Route;
use ln::msgs;
+use ln::msgs::LocalFeatures;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
use chain::keysinterface::KeysInterface;
use std::io::Cursor;
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::{Instant,Duration};
+use std::time::Duration;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
pub(super) struct ChannelHolder {
pub(super) by_id: HashMap<[u8; 32], Channel>,
pub(super) short_to_id: HashMap<u64, [u8; 32]>,
- pub(super) next_forward: Instant,
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the existence of a channel with the short id here, nor the short
/// ids in the PendingForwardHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
+ /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
+ /// can be failed/claimed by the user
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
- pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<HTLCPreviousHopData>>,
+ pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
pub(super) struct MutChannelHolder<'a> {
pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>,
pub(super) short_to_id: &'a mut HashMap<u64, [u8; 32]>,
- pub(super) next_forward: &'a mut Instant,
pub(super) forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
- pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<HTLCPreviousHopData>>,
+ pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
pub(super) pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
}
impl ChannelHolder {
MutChannelHolder {
by_id: &mut self.by_id,
short_to_id: &mut self.short_to_id,
- next_forward: &mut self.next_forward,
forward_htlcs: &mut self.forward_htlcs,
claimable_htlcs: &mut self.claimable_htlcs,
pending_msg_events: &mut self.pending_msg_events,
const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
-// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
-// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
-// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
-// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
-// on-chain to time out the HTLC.
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
+// ie that if the next-hop peer fails the HTLC within
+// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
+// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
+// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
+// LATENCY_GRACE_PERIOD_BLOCKS.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
macro_rules! secp_call {
( $res: expr, $err: expr ) => {
pub channel_value_satoshis: u64,
/// The user_id passed in to create_channel, or 0 if the channel was inbound.
pub user_id: u64,
+ /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
+ /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new outbound HTLCs). This further does not include any pending
+ /// outgoing HTLCs which are awaiting some other resolution to be sent.
+ pub outbound_capacity_msat: u64,
+ /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
+ /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new inbound HTLCs).
+ /// Note that there are some corner cases not fully handled here, so the actual available
+ /// inbound capacity may be slightly higher than this.
+ pub inbound_capacity_msat: u64,
+ /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+ /// the peer is connected, and (c) no monitor update failure is pending resolution.
+ pub is_live: bool,
}
macro_rules! handle_error {
}
}
-macro_rules! return_monitor_err {
+macro_rules! handle_monitor_err {
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
+ handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
match $err {
ChannelMonitorUpdateErr::PermanentFailure => {
+ log_error!($self, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
let (channel_id, mut chan) = $entry.remove_entry();
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
+ res
},
ChannelMonitorUpdateErr::TemporaryFailure => {
- $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()));
+ log_info!($self, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
+ log_bytes!($entry.key()[..]),
+ if $resend_commitment && $resend_raa {
+ match $action_type {
+ RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
+ RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
+ }
+ } else if $resend_commitment { "commitment" }
+ else if $resend_raa { "RAA" }
+ else { "nothing" },
+ (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
+ (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
+ if !$resend_commitment {
+ debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
+ }
+ if !$resend_raa {
+ debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
+ }
+ $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
},
}
}
}
+macro_rules! return_monitor_err {
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ }
+}
+
// Does not break in case of TemporaryFailure!
macro_rules! maybe_break_monitor_err {
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- match $err {
- ChannelMonitorUpdateErr::PermanentFailure => {
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
- }
- break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
- },
- ChannelMonitorUpdateErr::TemporaryFailure => {
- $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new());
+ match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
+ (e, ChannelMonitorUpdateErr::PermanentFailure) => {
+ break e;
},
+ (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
}
}
}
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
short_to_id: HashMap::new(),
- next_forward: Instant::now(),
forward_htlcs: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_their_node_id(),
channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
user_id: channel.get_user_id(),
+ is_live: channel.is_live(),
});
}
res
/// Gets the list of usable channels, in random order. Useful as an argument to
/// Router::get_route to ensure non-announced channels are used.
+ ///
+ /// These are guaranteed to have their is_live value set to true, see the documentation for
+ /// ChannelDetails::is_live for more info on exactly what the criteria are.
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
// internal/external nomenclature, but that's ok cause that's probably what the user
// really wanted anyway.
if channel.is_live() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_their_node_id(),
channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
user_id: channel.get_user_id(),
+ is_live: true,
});
}
}
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
// OUR PAYMENT!
// final_expiry_too_soon
- if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
- // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
- if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+ // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
}
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
excess_data: Vec::new(),
};
- let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
+ let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
Ok(msgs::ChannelUpdate {
Ok(res) => res,
Err(_) => return None, // Only in case of state precondition violations eg channel is closing
};
- let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]);
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
Some(msgs::AnnouncementSignatures {
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
+ let mut handle_errors = Vec::new();
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
- if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
- return;
- }
-
for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
if short_chan_id != 0 {
let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
continue;
}
};
- let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
-
- let mut add_htlc_msgs = Vec::new();
- let mut fail_htlc_msgs = Vec::new();
- for forward_info in pending_forwards.drain(..) {
- match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
- log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: forward_info.incoming_shared_secret,
- });
- match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
- } else {
- panic!("Stated return value requirements in send_htlc() were not met");
- }
- let chan_update = self.get_channel_update(forward_chan).unwrap();
- failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
- continue;
- },
- Ok(update_add) => {
- match update_add {
- Some(msg) => { add_htlc_msgs.push(msg); },
- None => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can add anymore HTLCs. The Channel
- // will automatically handle building the update_add_htlc and
- // commitment_signed messages when we can.
- // TODO: Do some kind of timer to set the channel as !is_live()
- // as we don't really want others relying on us relaying through
- // this channel currently :/.
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
+ let mut add_htlc_msgs = Vec::new();
+ let mut fail_htlc_msgs = Vec::new();
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ });
+ match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+ let chan_update = self.get_channel_update(chan.get()).unwrap();
+ failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
+ continue;
+ },
+ Ok(update_add) => {
+ match update_add {
+ Some(msg) => { add_htlc_msgs.push(msg); },
+ None => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can add anymore HTLCs. The Channel
+ // will automatically handle building the update_add_htlc and
+ // commitment_signed messages when we can.
+ // TODO: Do some kind of timer to set the channel as !is_live()
+ // as we don't really want others relying on us relaying through
+ // this channel currently :/.
+ }
}
}
}
- }
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
- match forward_chan.get_update_fail_htlc(htlc_id, err_packet) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ },
+ Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
+ Ok(None) => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can update the commitment
+ // transaction. The Channel will automatically handle
+ // building the update_fail_htlc and commitment_signed
+ // messages when we can.
+ // We don't need any kind of timer here as they should fail
+ // the channel onto the chain if they can't get our
+ // update_fail_htlc in time, it's not our problem.
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- },
- Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
- Ok(None) => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can update the commitment
- // transaction. The Channel will automatically handle
- // building the update_fail_htlc and commitment_signed
- // messages when we can.
- // We don't need any kind of timer here as they should fail
- // the channel onto the chain if they can't get our
- // update_fail_htlc in time, it's not our problem.
}
- }
- },
+ },
+ }
}
- }
- if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
- let (commitment_msg, monitor) = match forward_chan.send_commitment() {
- Ok(res) => res,
- Err(e) => {
- if let ChannelError::Ignore(_) = e {
- panic!("Stated return value requirements in send_commitment() were not met");
- }
- //TODO: Handle...this is bad!
+ if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
+ let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
+ Ok(res) => res,
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {
+ panic!("Stated return value requirements in send_commitment() were not met");
+ }
+ //TODO: Handle...this is bad!
+ continue;
+ },
+ };
+ if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
continue;
- },
- };
- if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
- unimplemented!();
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: add_htlc_msgs,
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: fail_htlc_msgs,
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: commitment_msg,
+ },
+ });
}
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: forward_chan.get_their_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: add_htlc_msgs,
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: fail_htlc_msgs,
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: commitment_msg,
- },
- });
+ } else {
+ unreachable!();
}
} else {
for forward_info in pending_forwards.drain(..) {
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
};
match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
- hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
- hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
+ hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)),
+ hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); },
};
new_events.push(events::Event::PaymentReceived {
payment_hash: forward_info.payment_hash,
};
}
+ for (their_node_id, err) in handle_errors.drain(..) {
+ match handle_error!(self, err) {
+ Ok(_) => {},
+ Err(e) => {
+ if let Some(msgs::ErrorAction::IgnoreError) = e.action {
+ } else {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: their_node_id,
+ action: e.action,
+ });
+ }
+ },
+ }
+ }
+
if new_events.is_empty() { return }
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_events);
}
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
- /// after a PaymentReceived event.
- /// expected_value is the value you expected the payment to be for (not the amount it actually
- /// was for from the PaymentReceived event).
- pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, expected_value: u64) -> bool {
+ /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
+ /// along the path (including in our own channel on which we received it).
+ /// Returns false if no payment was found to fail backwards, true if the process of failing the
+ /// HTLC backwards has been started.
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
- for htlc_with_hash in sources.drain(..) {
+ for (recvd_value, htlc_with_hash) in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc_with_hash), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(expected_value).to_vec() });
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() });
}
true
} else { false }
let mut forward_event = None;
if channel_state_lock.forward_htlcs.is_empty() {
- forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
- channel_state_lock.next_forward = forward_event.unwrap();
+ forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
}
match channel_state_lock.forward_htlcs.entry(short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some(mut sources) = removed_source {
- for htlc_with_hash in sources.drain(..) {
+ // TODO: We should require the user specify the expected amount so that we can claim
+ // only payments for the correct amount, and reject payments for incorrect amounts
+ // (which are probably middle nodes probing to break our privacy).
+ for (_, htlc_with_hash) in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
}
} else { false }
}
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
- match source {
- HTLCSource::OutboundRoute { .. } => {
- mem::drop(channel_state_lock);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
- },
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
- //TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let channel_state = channel_state_lock.borrow_parts();
-
- let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
- Some(chan_id) => chan_id.clone(),
- None => {
- // TODO: There is probably a channel manager somewhere that needs to
- // learn the preimage as the channel already hit the chain and that's
- // why it's missing.
- return
- }
- };
+ let (their_node_id, err) = loop {
+ match source {
+ HTLCSource::OutboundRoute { .. } => {
+ mem::drop(channel_state_lock);
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentSent {
+ payment_preimage
+ });
+ },
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
+ //TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ let channel_state = channel_state_lock.borrow_parts();
- let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
- match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
- Ok((msgs, monitor_option)) => {
- if let Some(chan_monitor) = monitor_option {
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();// but def don't push the event...
- }
+ let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ // TODO: There is probably a channel manager somewhere that needs to
+ // learn the preimage as the channel already hit the chain and that's
+ // why it's missing.
+ return
}
- if let Some((msg, commitment_signed)) = msgs {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_their_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
+ };
+
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
+ let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
+ Ok((msgs, monitor_option)) => {
+ if let Some(chan_monitor) = monitor_option {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if was_frozen_for_monitor {
+ assert!(msgs.is_none());
+ } else {
+ break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
+ }
+ }
}
- });
+ if let Some((msg, commitment_signed)) = msgs {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
+ }
+ },
+ Err(_e) => {
+ // TODO: There is probably a channel manager somewhere that needs to
+ // learn the preimage as the channel may be about to hit the chain.
+ //TODO: Do something with e?
+ return
+ },
}
- },
- Err(_e) => {
- // TODO: There is probably a channel manager somewhere that needs to
- // learn the preimage as the channel may be about to hit the chain.
- //TODO: Do something with e?
- return
- },
+ } else { unreachable!(); }
+ },
+ }
+ return;
+ };
+
+ match handle_error!(self, err) {
+ Ok(_) => {},
+ Err(e) => {
+ if let Some(msgs::ErrorAction::IgnoreError) = e.action {
+ } else {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: their_node_id,
+ action: e.action,
+ });
}
},
}
}
}
- fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
}
- let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration)
+ let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
Ok(())
}
- fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = channel_lock.borrow_parts();
//TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
//TODO: same as above
if !pending_forwards.is_empty() {
let mut channel_state = self.channel_state.lock().unwrap();
if channel_state.forward_htlcs.is_empty() {
- forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
- channel_state.next_forward = forward_event.unwrap();
+ forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
let were_node_one = announcement.node_id_1 == our_node_id;
- let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]);
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan);
}
/// We force-close the channel without letting our counterparty participate in the shutdown
- fn block_disconnected(&self, header: &BlockHeader) {
+ fn block_disconnected(&self, header: &BlockHeader, _: u32) {
let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
{
impl ChannelMessageHandler for ChannelManager {
//TODO: Handle errors and close channel (or so)
- fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
+ fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
let _ = self.total_consistency_lock.read().unwrap();
- handle_error!(self, self.internal_open_channel(their_node_id, msg))
+ handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg))
}
- fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
+ fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
let _ = self.total_consistency_lock.read().unwrap();
- handle_error!(self, self.internal_accept_channel(their_node_id, msg))
+ handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg))
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
true
})
}
+ pending_msg_events.retain(|msg| {
+ match msg {
+ &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
+ }
+ });
}
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
- for previous_hop in previous_hops {
+ for &(recvd_amt, ref previous_hop) in previous_hops.iter() {
+ recvd_amt.write(writer)?;
previous_hop.write(writer)?;
}
}
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
for _ in 0..previous_hops_len {
- previous_hops.push(Readable::read(reader)?);
+ previous_hops.push((Readable::read(reader)?, Readable::read(reader)?));
}
claimable_htlcs.insert(payment_hash, previous_hops);
}
channel_state: Mutex::new(ChannelHolder {
by_id,
short_to_id,
- next_forward: Instant::now(),
forward_htlcs,
claimable_htlcs,
pending_msg_events: Vec::new(),