use ln::msgs;
use ln::msgs::{HandleError, MsgEncodable};
use ln::channelmonitor::ChannelMonitor;
-use ln::channelmanager::PendingForwardHTLCInfo;
+use ln::channelmanager::{PendingForwardHTLCInfo, HTLCFailReason};
use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment};
use ln::chan_utils;
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
use util::sha2::Sha256;
use std::default::Default;
-use std::cmp;
+use std::{cmp,mem};
use std::time::Instant;
pub struct ChannelKeys {
pub fn new_from_seed(seed: &[u8; 32]) -> Result<ChannelKeys, secp256k1::Error> {
let mut prk = [0; 32];
hkdf_extract(Sha256::new(), b"rust-lightning key gen salt", seed, &mut prk);
- let secp_ctx = Secp256k1::new();
+ let secp_ctx = Secp256k1::without_caps();
let mut okm = [0; 32];
hkdf_expand(Sha256::new(), &prk, b"rust-lightning funding key info", &mut okm);
#[derive(PartialEq)]
enum HTLCState {
+ /// Added by remote, to be included in next local commitment tx.
RemoteAnnounced,
+ /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but
+ /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+ /// accept this HTLC. Implies AwaitingRemoteRevoke.
+ /// We also have not yet included this HTLC in a commitment_signed message, and are waiting on
+ /// a remote revoke_and_ack on a previous state before we can do so.
+ AwaitingRemoteRevokeToAnnounce,
+ /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but
+ /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+ /// accept this HTLC. Implies AwaitingRemoteRevoke.
+ /// We have included this HTLC in our latest commitment_signed and are now just waiting on a
+ /// revoke_and_ack.
+ AwaitingAnnouncedRemoteRevoke,
+ /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
+ /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
+ /// we will promote to Committed (note that they may not accept it until the next time we
+ /// revoke, but we dont really care about that:
+ /// * they've revoked, so worst case we can announce an old state and get our (option on)
+ /// money back (though we wont), and,
+ /// * we'll send them a revoke when they send a commitment_signed, and since only they're
+ /// allowed to remove it, the "can only be removed once committed on both sides" requirement
+ /// doesn't matter to us and its up to them to enforce it, worst-case they jump ahead but
+ /// we'll never get out of sync).
LocalAnnounced,
Committed,
+ /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
+ /// the change (though they'll need to revoke before we fail the payment).
+ RemoteRemoved,
+ /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
+ /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+ /// can do any backwards failing. Implies AwaitingRemoteRevoke.
+ /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
+ /// remote revoke_and_ack on a previous state before we can do so.
+ AwaitingRemoteRevokeToRemove,
+ /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
+ /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+ /// can do any backwards failing. Implies AwaitingRemoteRevoke.
+ /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
+ /// revoke_and_ack to drop completely.
+ AwaitingRemovedRemoteRevoke,
+ /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
+ /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
+ /// we'll promote to LocalRemovedAwaitingCommitment if we fulfilled, otherwise we'll drop at
+ /// that point.
+ /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
+ /// commitment transaction without it as otherwise we'll have to force-close the channel to
+ /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
+ /// anyway).
+ LocalRemoved,
+ /// Removed by us, sent a new commitment_signed and got a revoke_and_ack. Just waiting on an
+ /// updated local commitment transaction.
+ LocalRemovedAwaitingCommitment,
}
-struct HTLCOutput {
+struct HTLCOutput { //TODO: Refactor into Outbound/InboundHTLCOutput (will save memory and fewer panics)
outbound: bool, // ie to an HTLC-Timeout transaction
htlc_id: u64,
amount_msat: u64,
cltv_expiry: u32,
payment_hash: [u8; 32],
state: HTLCState,
- // state == RemoteAnnounced implies pending_forward_state, otherwise it must be None
+ /// If we're in a Remote* removed state, set if they failed, otherwise None
+ fail_reason: Option<HTLCFailReason>,
+ /// If we're in LocalRemoved*, set to true if we fulfilled the HTLC, and can claim money
+ local_removed_fulfilled: bool,
+ /// state pre-committed Remote* implies pending_forward_state, otherwise it must be None
pending_forward_state: Option<PendingForwardHTLCInfo>,
}
}
/// See AwaitingRemoteRevoke ChannelState for more info
-struct HTLCOutputAwaitingACK {
- // always outbound
- amount_msat: u64,
- cltv_expiry: u32,
- payment_hash: [u8; 32],
- onion_routing_packet: msgs::OnionPacket,
- time_created: Instant, //TODO: Some kind of timeout thing-a-majig
+enum HTLCUpdateAwaitingACK {
+ AddHTLC {
+ // always outbound
+ amount_msat: u64,
+ cltv_expiry: u32,
+ payment_hash: [u8; 32],
+ onion_routing_packet: msgs::OnionPacket,
+ time_created: Instant, //TODO: Some kind of timeout thing-a-majig
+ },
+ ClaimHTLC {
+ payment_preimage: [u8; 32],
+ payment_hash: [u8; 32], // Only here for effecient duplicate detection
+ },
+ FailHTLC {
+ payment_hash: [u8; 32],
+ err_packet: msgs::OnionErrorPacket,
+ },
}
enum ChannelState {
cur_remote_commitment_transaction_number: u64,
value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
pending_htlcs: Vec<HTLCOutput>,
- holding_cell_htlcs: Vec<HTLCOutputAwaitingACK>,
+ holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
next_local_htlc_id: u64,
next_remote_htlc_id: u64,
channel_update_count: u32,
their_delayed_payment_basepoint: PublicKey,
their_htlc_basepoint: PublicKey,
their_cur_commitment_point: PublicKey,
+
+ their_prev_commitment_point: Option<PublicKey>,
their_node_id: PublicKey,
their_shutdown_scriptpubkey: Option<Script>,
cur_remote_commitment_transaction_number: (1 << 48) - 1,
value_to_self_msat: channel_value_satoshis * 1000, //TODO: give them something on open? Parameterize it?
pending_htlcs: Vec::new(),
- holding_cell_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
channel_update_count: 0,
their_delayed_payment_basepoint: PublicKey::new(),
their_htlc_basepoint: PublicKey::new(),
their_cur_commitment_point: PublicKey::new(),
+
+ their_prev_commitment_point: None,
their_node_id: their_node_id,
their_shutdown_scriptpubkey: None,
cur_remote_commitment_transaction_number: (1 << 48) - 1,
value_to_self_msat: msg.push_msat,
pending_htlcs: Vec::new(),
- holding_cell_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
channel_update_count: 0,
their_delayed_payment_basepoint: msg.delayed_payment_basepoint,
their_htlc_basepoint: msg.htlc_basepoint,
their_cur_commitment_point: msg.first_per_commitment_point,
+
+ their_prev_commitment_point: None,
their_node_id: their_node_id,
their_shutdown_scriptpubkey: None,
let dust_limit_satoshis = if local { self.our_dust_limit_satoshis } else { self.their_dust_limit_satoshis };
let mut remote_htlc_total_msat = 0;
let mut local_htlc_total_msat = 0;
+ let mut value_to_self_msat_offset = 0;
for ref htlc in self.pending_htlcs.iter() {
- if htlc.state == HTLCState::Committed || htlc.state == (if generated_by_local { HTLCState::LocalAnnounced } else { HTLCState::RemoteAnnounced }) {
+ let include = match htlc.state {
+ HTLCState::RemoteAnnounced => !generated_by_local,
+ HTLCState::AwaitingRemoteRevokeToAnnounce => !generated_by_local,
+ HTLCState::AwaitingAnnouncedRemoteRevoke => true,
+ HTLCState::LocalAnnounced => generated_by_local,
+ HTLCState::Committed => true,
+ HTLCState::RemoteRemoved => generated_by_local,
+ HTLCState::AwaitingRemoteRevokeToRemove => generated_by_local,
+ HTLCState::AwaitingRemovedRemoteRevoke => false,
+ HTLCState::LocalRemoved => !generated_by_local,
+ HTLCState::LocalRemovedAwaitingCommitment => false,
+ };
+
+ if include {
if htlc.outbound == local { // "offered HTLC output"
if htlc.amount_msat / 1000 >= dust_limit_satoshis + (self.feerate_per_kw * HTLC_TIMEOUT_TX_WEIGHT / 1000) {
let htlc_in_tx = htlc.get_in_commitment(true);
} else {
remote_htlc_total_msat += htlc.amount_msat;
}
+ } else {
+ match htlc.state {
+ HTLCState::AwaitingRemoteRevokeToRemove|HTLCState::AwaitingRemovedRemoteRevoke => {
+ if generated_by_local && htlc.fail_reason.is_none() {
+ value_to_self_msat_offset -= htlc.amount_msat as i64;
+ }
+ },
+ HTLCState::LocalRemoved => {
+ if !generated_by_local && htlc.local_removed_fulfilled {
+ value_to_self_msat_offset += htlc.amount_msat as i64;
+ }
+ },
+ HTLCState::LocalRemovedAwaitingCommitment => {
+ value_to_self_msat_offset += htlc.amount_msat as i64;
+ },
+ _ => {},
+ }
}
}
let total_fee: u64 = self.feerate_per_kw * (COMMITMENT_TX_BASE_WEIGHT + (txouts.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
- let value_to_self: i64 = ((self.value_to_self_msat - local_htlc_total_msat) as i64) / 1000 - if self.channel_outbound { total_fee as i64 } else { 0 };
- let value_to_remote: i64 = (((self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) / 1000) as i64) - if self.channel_outbound { 0 } else { total_fee as i64 };
+ let value_to_self: i64 = ((self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset) / 1000 - if self.channel_outbound { total_fee as i64 } else { 0 };
+ let value_to_remote: i64 = (((self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset) / 1000) - if self.channel_outbound { 0 } else { total_fee as i64 };
let value_to_a = if local { value_to_self } else { value_to_remote };
let value_to_b = if local { value_to_remote } else { value_to_self };
let mut htlcs_used: Vec<HTLCOutputInCommitment> = Vec::new();
for (idx, out) in txouts.drain(..).enumerate() {
outputs.push(out.0);
- match out.1 {
- Some(out_htlc) => {
- htlcs_used.push(out_htlc);
- htlcs_used.last_mut().unwrap().transaction_output_index = idx as u32;
- },
- None => {}
+ if let Some(out_htlc) = out.1 {
+ htlcs_used.push(out_htlc);
+ htlcs_used.last_mut().unwrap().transaction_output_index = idx as u32;
}
}
/// Builds the htlc-success or htlc-timeout transaction which spends a given HTLC output
/// @local is used only to convert relevant internal structures which refer to remote vs local
/// to decide value of outputs and direction of HTLCs.
- fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys) -> Result<Transaction, HandleError> {
+ fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys) -> Transaction {
let mut txins: Vec<TxIn> = Vec::new();
txins.push(TxIn {
prev_hash: prev_hash.clone(),
value: htlc.amount_msat / 1000 - total_fee //TODO: BOLT 3 does not specify if we should add amount_msat before dividing or if we should divide by 1000 before subtracting (as we do here)
});
- Ok(Transaction {
+ Transaction {
version: 2,
lock_time: if htlc.offered { htlc.cltv_expiry } else { 0 },
input: txins,
output: txouts,
- })
+ }
}
/// Signs a transaction created by build_htlc_transaction. If the transaction is an
Ok(())
}
- pub fn get_update_fulfill_htlc(&mut self, payment_preimage: [u8; 32]) -> Result<msgs::UpdateFulfillHTLC, HandleError> {
+ pub fn get_update_fulfill_htlc(&mut self, payment_preimage_arg: [u8; 32]) -> Result<Option<msgs::UpdateFulfillHTLC>, HandleError> {
// Either ChannelFunded got set (which means it wont bet unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
let mut sha = Sha256::new();
- sha.input(&payment_preimage);
- let mut payment_hash = [0; 32];
- sha.result(&mut payment_hash);
+ sha.input(&payment_preimage_arg);
+ let mut payment_hash_calc = [0; 32];
+ sha.result(&mut payment_hash_calc);
+
+ // Now update local state:
+ if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+ for pending_update in self.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, .. } => {
+ if payment_preimage_arg == *payment_preimage {
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { ref payment_hash, .. } => {
+ if payment_hash_calc == *payment_hash {
+ return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+ }
+ },
+ _ => {}
+ }
+ }
+ self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: payment_preimage_arg, payment_hash: payment_hash_calc,
+ });
+ return Ok(None);
+ }
let mut htlc_id = 0;
let mut htlc_amount_msat = 0;
- //TODO: swap_remove since we dont need to maintain ordering here
- self.pending_htlcs.retain(|ref htlc| {
- if !htlc.outbound && htlc.payment_hash == payment_hash {
+ for htlc in self.pending_htlcs.iter_mut() {
+ if !htlc.outbound && htlc.payment_hash == payment_hash_calc {
if htlc_id != 0 {
panic!("Duplicate HTLC payment_hash, you probably re-used payment preimages, NEVER DO THIS!");
}
htlc_id = htlc.htlc_id;
htlc_amount_msat += htlc.amount_msat;
- false
- } else { true }
- });
+ if htlc.state == HTLCState::Committed {
+ htlc.state = HTLCState::LocalRemoved;
+ htlc.local_removed_fulfilled = true;
+ } else if htlc.state == HTLCState::RemoteAnnounced {
+ panic!("Somehow forwarded HTLC prior to remote revocation!");
+ } else if htlc.state == HTLCState::LocalRemoved || htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+ return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+ } else {
+ panic!("Have an inbound HTLC when not awaiting remote revoke that had a garbage state");
+ }
+ }
+ }
if htlc_amount_msat == 0 {
return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
}
+ self.channel_monitor.provide_payment_preimage(&payment_preimage_arg);
- //TODO: This is racy af, they may have pending messages in flight to us that will not have
- //received this yet!
- self.value_to_self_msat += htlc_amount_msat;
- Ok(msgs::UpdateFulfillHTLC {
+ Ok(Some(msgs::UpdateFulfillHTLC {
channel_id: self.channel_id(),
htlc_id: htlc_id,
- payment_preimage: payment_preimage,
- })
+ payment_preimage: payment_preimage_arg,
+ }))
+ }
+
+ pub fn get_update_fulfill_htlc_and_commit(&mut self, payment_preimage: [u8; 32]) -> Result<Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, HandleError> {
+ match self.get_update_fulfill_htlc(payment_preimage)? {
+ Some(update_fulfill_htlc) =>
+ Ok(Some((update_fulfill_htlc, self.send_commitment_no_status_check()?))),
+ None => Ok(None)
+ }
}
- pub fn get_update_fail_htlc(&mut self, payment_hash: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<msgs::UpdateFailHTLC, HandleError> {
+ pub fn get_update_fail_htlc(&mut self, payment_hash_arg: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(HandleError{err: "Was asked to fail an HTLC when channel was not in an operational state", msg: None});
}
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ // Now update local state:
+ if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+ for pending_update in self.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_hash, .. } => {
+ if *payment_hash_arg == *payment_hash {
+ return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { ref payment_hash, .. } => {
+ if *payment_hash_arg == *payment_hash {
+ return Ok(None);
+ }
+ },
+ _ => {}
+ }
+ }
+ self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+ payment_hash: payment_hash_arg.clone(),
+ err_packet,
+ });
+ return Ok(None);
+ }
+
let mut htlc_id = 0;
let mut htlc_amount_msat = 0;
- //TODO: swap_remove since we dont need to maintain ordering here
- self.pending_htlcs.retain(|ref htlc| {
- if !htlc.outbound && htlc.payment_hash == *payment_hash {
+ for htlc in self.pending_htlcs.iter_mut() {
+ if !htlc.outbound && htlc.payment_hash == *payment_hash_arg {
if htlc_id != 0 {
panic!("Duplicate HTLC payment_hash, you probably re-used payment preimages, NEVER DO THIS!");
}
htlc_id = htlc.htlc_id;
htlc_amount_msat += htlc.amount_msat;
- false
- } else { true }
- });
+ if htlc.state == HTLCState::Committed {
+ htlc.state = HTLCState::LocalRemoved;
+ } else if htlc.state == HTLCState::RemoteAnnounced {
+ panic!("Somehow forwarded HTLC prior to remote revocation!");
+ } else if htlc.state == HTLCState::LocalRemoved || htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+ return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+ } else {
+ panic!("Have an inbound HTLC when not awaiting remote revoke that had a garbage state");
+ }
+ }
+ }
if htlc_amount_msat == 0 {
return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
}
- //TODO: This is racy af, they may have pending messages in flight to us that will not have
- //received this yet!
-
- Ok(msgs::UpdateFailHTLC {
+ Ok(Some(msgs::UpdateFailHTLC {
channel_id: self.channel_id(),
htlc_id,
reason: err_packet
- })
+ }))
+ }
+
+ pub fn get_update_fail_htlc_and_commit(&mut self, payment_hash: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)>, HandleError> {
+ match self.get_update_fail_htlc(payment_hash, err_packet)? {
+ Some(update_fail_htlc) =>
+ Ok(Some((update_fail_htlc, self.send_commitment_no_status_check()?))),
+ None => Ok(None)
+ }
}
// Message handlers:
self.channel_state = ChannelState::FundingSent as u32;
let funding_txo = self.channel_monitor.get_funding_txo().unwrap();
self.channel_id = funding_txo.0.into_be() ^ Uint256::from_u64(funding_txo.1 as u64).unwrap(); //TODO: or le?
+ self.cur_remote_commitment_transaction_number -= 1;
+ self.cur_local_commitment_transaction_number -= 1;
Ok(msgs::FundingSigned {
channel_id: self.channel_id,
if self.channel_state != ChannelState::FundingCreated as u32 {
return Err(HandleError{err: "Received funding_signed in strange state!", msg: None});
}
- if self.channel_monitor.get_min_seen_secret() != (1 << 48) || self.cur_remote_commitment_transaction_number != (1 << 48) - 1 || self.cur_local_commitment_transaction_number != (1 << 48) - 1 {
+ if self.channel_monitor.get_min_seen_secret() != (1 << 48) || self.cur_remote_commitment_transaction_number != (1 << 48) - 2 || self.cur_local_commitment_transaction_number != (1 << 48) - 1 {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
secp_call!(self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey), "Invalid funding_signed signature from peer");
self.channel_state = ChannelState::FundingSent as u32;
+ self.cur_local_commitment_transaction_number -= 1;
Ok(())
}
return Err(HandleError{err: "Peer sent a funding_locked at a strange time", msg: None});
}
- //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
- //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
- //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
- //a protocol oversight, but I assume I'm just missing something.
- if self.their_cur_commitment_point != msg.next_per_commitment_point {
- return Err(HandleError{err: "Non-duplicate next_per_commitment_point in funding_locked", msg: None});
- }
+ self.their_prev_commitment_point = Some(self.their_cur_commitment_point);
self.their_cur_commitment_point = msg.next_per_commitment_point;
Ok(())
}
/// Returns (inbound_htlc_count, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat)
- fn get_pending_htlc_stats(&self) -> (u32, u32, u64, u64) {
+ /// If its for a remote update check, we need to be more lax about checking against messages we
+ /// sent but they may not have received/processed before they sent this message. Further, for
+ /// our own sends, we're more conservative and even consider things they've removed against
+ /// totals, though there is little reason to outside of further avoiding any race condition
+ /// issues.
+ fn get_pending_htlc_stats(&self, for_remote_update_check: bool) -> (u32, u32, u64, u64) {
let mut inbound_htlc_count: u32 = 0;
let mut outbound_htlc_count: u32 = 0;
let mut htlc_outbound_value_msat = 0;
let mut htlc_inbound_value_msat = 0;
for ref htlc in self.pending_htlcs.iter() {
+ match htlc.state {
+ HTLCState::RemoteAnnounced => {},
+ HTLCState::AwaitingRemoteRevokeToAnnounce => {},
+ HTLCState::AwaitingAnnouncedRemoteRevoke => {},
+ HTLCState::LocalAnnounced => { if for_remote_update_check { continue; } },
+ HTLCState::Committed => {},
+ HTLCState::RemoteRemoved => { if for_remote_update_check { continue; } },
+ HTLCState::AwaitingRemoteRevokeToRemove => { if for_remote_update_check { continue; } },
+ HTLCState::AwaitingRemovedRemoteRevoke => { if for_remote_update_check { continue; } },
+ HTLCState::LocalRemoved => {},
+ HTLCState::LocalRemovedAwaitingCommitment => { if for_remote_update_check { continue; } },
+ }
if !htlc.outbound {
inbound_htlc_count += 1;
htlc_inbound_value_msat += htlc.amount_msat;
return Err(HandleError{err: "Remote side tried to send less than our minimum HTLC value", msg: None});
}
- let (inbound_htlc_count, _, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats();
+ let (inbound_htlc_count, _, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(true);
if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
return Err(HandleError{err: "Remote tried to push more than our max accepted HTLCs", msg: None});
}
payment_hash: msg.payment_hash,
cltv_expiry: msg.cltv_expiry,
state: HTLCState::RemoteAnnounced,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: Some(pending_forward_state),
});
}
/// Removes an outbound HTLC which has been commitment_signed by the remote end
- fn remove_outbound_htlc(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>) -> Result<HTLCOutput, HandleError> {
- let mut found_idx = None;
- for (idx, ref htlc) in self.pending_htlcs.iter().enumerate() {
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>, fail_reason: Option<HTLCFailReason>) -> Result<(), HandleError> {
+ for mut htlc in self.pending_htlcs.iter_mut() {
if htlc.outbound && htlc.htlc_id == htlc_id {
match check_preimage {
None => {},
return Err(HandleError{err: "Remote tried to fulfill HTLC with an incorrect preimage", msg: None});
}
};
- found_idx = Some(idx);
- break;
- }
- }
- match found_idx {
- None => Err(HandleError{err: "Remote tried to fulfill/fail an HTLC we couldn't find", msg: None}),
- Some(idx) => {
- Ok(self.pending_htlcs.swap_remove(idx))
- }
- }
- }
-
- /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
- /// fulfilling or failing the last pending HTLC)
- fn free_holding_cell_htlcs(&mut self) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
- if self.holding_cell_htlcs.len() != 0 {
- let mut new_htlcs = self.holding_cell_htlcs.split_off(0);
- let mut update_add_msgs = Vec::with_capacity(new_htlcs.len());
- let mut err = None;
- for new_htlc in new_htlcs.drain(..) {
- // Note that this *can* fail, though it should be due to rather-rare conditions on
- // fee races with adding too many outputs which push our total payments just over
- // the limit. In case its less rare than I anticipate, we may want to revisit
- // handling this case better and maybe fufilling some of the HTLCs while attempting
- // to rebalance channels.
- if self.holding_cell_htlcs.len() != 0 {
- self.holding_cell_htlcs.push(new_htlc);
+ if htlc.state == HTLCState::LocalAnnounced {
+ return Err(HandleError{err: "Remote tried to fulfill HTLC before it had been committed", msg: None});
+ } else if htlc.state == HTLCState::Committed {
+ htlc.state = HTLCState::RemoteRemoved;
+ htlc.fail_reason = fail_reason;
+ } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove || htlc.state == HTLCState::AwaitingRemovedRemoteRevoke || htlc.state == HTLCState::RemoteRemoved {
+ return Err(HandleError{err: "Remote tried to fulfill HTLC that they'd already fulfilled", msg: None});
} else {
- match self.send_htlc(new_htlc.amount_msat, new_htlc.payment_hash, new_htlc.cltv_expiry, new_htlc.onion_routing_packet.clone()) {
- Ok(update_add_msg_option) => update_add_msgs.push(update_add_msg_option.unwrap()),
- Err(e) => {
- self.holding_cell_htlcs.push(new_htlc);
- err = Some(e);
- }
- }
+ panic!("Got a non-outbound state on an outbound HTLC");
}
+ return Ok(());
}
- //TODO: Need to examine the type of err - if its a fee issue or similar we may want to
- //fail it back the route, if its a temporary issue we can ignore it...
- if update_add_msgs.len() > 0 {
- Ok(Some((update_add_msgs, self.send_commitment()?)))
- } else {
- Err(err.unwrap())
- }
- } else {
- Ok(None)
}
+ Err(HandleError{err: "Remote tried to fulfill/fail an HTLC we couldn't find", msg: None})
}
pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
let mut payment_hash = [0; 32];
sha.result(&mut payment_hash);
- match self.remove_outbound_htlc(msg.htlc_id, Some(payment_hash)) {
- Err(e) => return Err(e),
- Ok(htlc) => {
- //TODO: Double-check that we didn't exceed some limits (or value_to_self went
- //negative here?)
- self.value_to_self_msat -= htlc.amount_msat;
- }
- }
- Ok(())
+ self.channel_monitor.provide_payment_preimage(&msg.payment_preimage);
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None)
}
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC) -> Result<[u8; 32], HandleError> {
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", msg: None});
}
- let payment_hash = match self.remove_outbound_htlc(msg.htlc_id, None) {
- Err(e) => return Err(e),
- Ok(htlc) => {
- //TODO: Double-check that we didn't exceed some limits (or value_to_self went
- //negative here?)
- htlc.payment_hash
- }
- };
- Ok(payment_hash)
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
}
- pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC) -> Result<[u8; 32], HandleError> {
+ pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", msg: None});
}
- let payment_hash = match self.remove_outbound_htlc(msg.htlc_id, None) {
- Err(e) => return Err(e),
- Ok(htlc) => {
- //TODO: Double-check that we didn't exceed some limits (or value_to_self went
- //negative here?)
- htlc.payment_hash
- }
- };
- Ok(payment_hash)
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
}
- pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Vec<PendingForwardHTLCInfo>), HandleError> {
+ pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(HandleError{err: "Got commitment signed message when channel was not in an operational state", msg: None});
}
}
for (idx, ref htlc) in local_commitment_tx.1.iter().enumerate() {
- let htlc_tx = self.build_htlc_transaction(&local_commitment_txid, htlc, true, &local_keys)?;
+ let htlc_tx = self.build_htlc_transaction(&local_commitment_txid, htlc, true, &local_keys);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &local_keys);
let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
secp_call!(self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key), "Invalid HTLC tx siganture from peer");
}
let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number - 1)).unwrap();
- let per_commitment_secret = chan_utils::build_commitment_secret(self.local_keys.commitment_seed, self.cur_local_commitment_transaction_number);
-
- //TODO: Store htlc keys in our channel_watcher
+ let per_commitment_secret = chan_utils::build_commitment_secret(self.local_keys.commitment_seed, self.cur_local_commitment_transaction_number + 1);
// Update state now that we've passed all the can-fail calls...
- let mut to_forward_infos = Vec::new();
- for ref mut htlc in self.pending_htlcs.iter_mut() {
+ let mut need_our_commitment = false;
+ for htlc in self.pending_htlcs.iter_mut() {
if htlc.state == HTLCState::RemoteAnnounced {
- htlc.state = HTLCState::Committed;
- to_forward_infos.push(htlc.pending_forward_state.take().unwrap());
+ htlc.state = HTLCState::AwaitingRemoteRevokeToAnnounce;
+ need_our_commitment = true;
+ } else if htlc.state == HTLCState::RemoteRemoved {
+ htlc.state = HTLCState::AwaitingRemoteRevokeToRemove;
+ need_our_commitment = true;
}
}
+ // Finally delete all the LocalRemovedAwaitingCommitment HTLCs
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ let mut claimed_value_msat = 0;
+ self.pending_htlcs.retain(|htlc| {
+ if htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+ claimed_value_msat += htlc.amount_msat;
+ false
+ } else { true }
+ });
+ self.value_to_self_msat += claimed_value_msat;
self.cur_local_commitment_transaction_number -= 1;
+ let our_commitment_signed = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ Some(self.send_commitment_no_status_check()?)
+ } else { None };
+
Ok((msgs::RevokeAndACK {
channel_id: self.channel_id,
per_commitment_secret: per_commitment_secret,
next_per_commitment_point: next_per_commitment_point,
- }, to_forward_infos))
+ }, our_commitment_signed))
+ }
+
+ /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
+ /// fulfilling or failing the last pending HTLC)
+ fn free_holding_cell_htlcs(&mut self) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+ if self.holding_cell_htlc_updates.len() != 0 {
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
+ let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut err = None;
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case its less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fufilling some of the HTLCs while attempting
+ // to rebalance channels.
+ if err.is_some() { // We're back to AwaitingRemoteRevoke (or are about to fail the channel)
+ self.holding_cell_htlc_updates.push(htlc_update);
+ } else {
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, payment_hash, ref onion_routing_packet, ..} => {
+ match self.send_htlc(amount_msat, payment_hash, cltv_expiry, onion_routing_packet.clone()) {
+ Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+ Err(e) => {
+ err = Some(e);
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage, .. } => {
+ match self.get_update_fulfill_htlc(payment_preimage) {
+ Ok(update_fulfill_msg_option) => update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()),
+ Err(e) => {
+ err = Some(e);
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { payment_hash, ref err_packet } => {
+ match self.get_update_fail_htlc(&payment_hash, err_packet.clone()) {
+ Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()),
+ Err(e) => {
+ err = Some(e);
+ }
+ }
+ },
+ }
+ if err.is_some() {
+ self.holding_cell_htlc_updates.push(htlc_update);
+ }
+ }
+ }
+ //TODO: Need to examine the type of err - if its a fee issue or similar we may want to
+ //fail it back the route, if its a temporary issue we can ignore it...
+ match err {
+ None => {
+ Ok(Some(msgs::CommitmentUpdate {
+ update_add_htlcs,
+ update_fulfill_htlcs,
+ update_fail_htlcs,
+ commitment_signed: self.send_commitment_no_status_check()?
+ }))
+ },
+ Some(e) => Err(e)
+ }
+ } else {
+ Ok(None)
+ }
}
/// Handles receiving a remote's revoke_and_ack. Note that we may return a new
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
+ pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK) -> Result<(Option<msgs::CommitmentUpdate>, Vec<PendingForwardHTLCInfo>, Vec<([u8; 32], HTLCFailReason)>), HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(HandleError{err: "Got revoke/ACK message when channel was not in an operational state", msg: None});
}
- if PublicKey::from_secret_key(&self.secp_ctx, &secp_call!(SecretKey::from_slice(&self.secp_ctx, &msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret")).unwrap() != self.their_cur_commitment_point {
- return Err(HandleError{err: "Got a revoke commitment secret which didn't correspond to their current pubkey", msg: None});
+ if let Some(their_prev_commitment_point) = self.their_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.secp_ctx, &secp_call!(SecretKey::from_slice(&self.secp_ctx, &msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret")).unwrap() != their_prev_commitment_point {
+ return Err(HandleError{err: "Got a revoke commitment secret which didn't correspond to their current pubkey", msg: None});
+ }
}
- self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number, msg.per_commitment_secret)?;
+ self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)?;
// Update state now that we've passed all the can-fail calls...
// (note that we may still fail to generate the new commitment_signed message, but that's
// OK, we step the channel here and *then* if the new generation fails we can fail the
// channel based on that, but stepping stuff here should be safe either way.
self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.their_prev_commitment_point = Some(self.their_cur_commitment_point);
self.their_cur_commitment_point = msg.next_per_commitment_point;
self.cur_remote_commitment_transaction_number -= 1;
+
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ self.pending_htlcs.retain(|htlc| {
+ if htlc.state == HTLCState::LocalRemoved {
+ if htlc.local_removed_fulfilled { true } else { false }
+ } else if htlc.state == HTLCState::AwaitingRemovedRemoteRevoke {
+ if let Some(reason) = htlc.fail_reason.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.payment_hash, reason));
+ } else {
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
for htlc in self.pending_htlcs.iter_mut() {
if htlc.state == HTLCState::LocalAnnounced {
htlc.state = HTLCState::Committed;
+ } else if htlc.state == HTLCState::AwaitingRemoteRevokeToAnnounce {
+ htlc.state = HTLCState::AwaitingAnnouncedRemoteRevoke;
+ require_commitment = true;
+ } else if htlc.state == HTLCState::AwaitingAnnouncedRemoteRevoke {
+ htlc.state = HTLCState::Committed;
+ to_forward_infos.push(htlc.pending_forward_state.take().unwrap());
+ } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove {
+ htlc.state = HTLCState::AwaitingRemovedRemoteRevoke;
+ require_commitment = true;
+ } else if htlc.state == HTLCState::LocalRemoved {
+ assert!(htlc.local_removed_fulfilled);
+ htlc.state = HTLCState::LocalRemovedAwaitingCommitment;
}
}
+ self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
- self.free_holding_cell_htlcs()
+ match self.free_holding_cell_htlcs()? {
+ Some(commitment_update) => {
+ Ok((Some(commitment_update), to_forward_infos, revoked_htlcs))
+ },
+ None => {
+ if require_commitment {
+ Ok((Some(msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ commitment_signed: self.send_commitment_no_status_check()?
+ }), to_forward_infos, revoked_htlcs))
+ } else {
+ Ok((None, to_forward_infos, revoked_htlcs))
+ }
+ }
+ }
}
pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
- if self.channel_outbound {
+ if self.channel_outbound {
return Err(HandleError{err: "Non-funding remote tried to update channel fee", msg: None});
- }
+ }
Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
self.feerate_per_kw = msg.feerate_per_kw as u64;
Ok(())
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
// cell HTLCs and return them to fail the payment.
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlcs.len());
- for htlc in self.holding_cell_htlcs.drain(..) {
- dropped_outbound_htlcs.push(htlc.payment_hash);
- }
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ self.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, .. } => {
+ dropped_outbound_htlcs.push(payment_hash.clone());
+ false
+ },
+ _ => true
+ }
+ });
for htlc in self.pending_htlcs.iter() {
if htlc.state == HTLCState::LocalAnnounced {
return Ok((None, None, dropped_outbound_htlcs));
self.channel_state = ChannelState::FundingCreated as u32;
let funding_txo = self.channel_monitor.get_funding_txo().unwrap();
self.channel_id = funding_txo.0.into_be() ^ Uint256::from_u64(funding_txo.1 as u64).unwrap(); //TODO: or le?
+ self.cur_remote_commitment_transaction_number -= 1;
Ok(msgs::FundingCreated {
temporary_channel_id: temporary_channel_id,
return Err(HandleError{err: "Cannot send less than their minimum HTLC value", msg: None});
}
- let (_, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats();
+ let (_, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(false);
if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
return Err(HandleError{err: "Cannot push more than their max accepted HTLCs", msg: None});
}
// Now update local state:
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
//TODO: Check the limits *including* other pending holding cell HTLCs!
- self.holding_cell_htlcs.push(HTLCOutputAwaitingACK {
+ self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
amount_msat: amount_msat,
payment_hash: payment_hash,
cltv_expiry: cltv_expiry,
payment_hash: payment_hash.clone(),
cltv_expiry: cltv_expiry,
state: HTLCState::LocalAnnounced,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None
});
if !have_updates {
return Err(HandleError{err: "Cannot create commitment tx until we have some updates to send", msg: None});
}
-
+ self.send_commitment_no_status_check()
+ }
+ /// Only fails in case of bad keys
+ fn send_commitment_no_status_check(&mut self) -> Result<msgs::CommitmentSigned, HandleError> {
let funding_script = self.get_funding_redeemscript();
+ // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+ // fail to generate this, we still are at least at a position where upgrading their status
+ // is acceptable.
+ for htlc in self.pending_htlcs.iter_mut() {
+ if htlc.state == HTLCState::AwaitingRemoteRevokeToAnnounce {
+ htlc.state = HTLCState::AwaitingAnnouncedRemoteRevoke;
+ } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove {
+ htlc.state = HTLCState::AwaitingRemovedRemoteRevoke;
+ }
+ }
+
let remote_keys = self.build_remote_transaction_keys()?;
let remote_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, true);
let remote_commitment_txid = remote_commitment_tx.0.txid();
let mut htlc_sigs = Vec::new();
for ref htlc in remote_commitment_tx.1.iter() {
- let htlc_tx = self.build_htlc_transaction(&remote_commitment_txid, htlc, false, &remote_keys)?;
+ let htlc_tx = self.build_htlc_transaction(&remote_commitment_txid, htlc, false, &remote_keys);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &remote_keys);
let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
let our_htlc_key = secp_derived_key!(chan_utils::derive_private_key(&self.secp_ctx, &remote_keys.per_commitment_point, &self.local_keys.htlc_base_key));
pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: [u8; 32], cltv_expiry: u32, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned)>, HandleError> {
match self.send_htlc(amount_msat, payment_hash, cltv_expiry, onion_routing_packet)? {
Some(update_add_htlc) =>
- Ok(Some((update_add_htlc, self.send_commitment()?))),
+ Ok(Some((update_add_htlc, self.send_commitment_no_status_check()?))),
None => Ok(None)
}
}
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
// cell HTLCs and return them to fail the payment.
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlcs.len());
- for htlc in self.holding_cell_htlcs.drain(..) {
- dropped_outbound_htlcs.push(htlc.payment_hash);
- }
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ self.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, .. } => {
+ dropped_outbound_htlcs.push(payment_hash.clone());
+ false
+ },
+ _ => true
+ }
+ });
Ok((msgs::Shutdown {
channel_id: self.channel_id,
let remote_signature = Signature::from_der(&secp_ctx, &hex_bytes($their_sig_hex).unwrap()[..]).unwrap();
let ref htlc = unsigned_tx.1[$htlc_idx];
- let mut htlc_tx = chan.build_htlc_transaction(&unsigned_tx.0.txid(), &htlc, true, &keys).unwrap();
+ let mut htlc_tx = chan.build_htlc_transaction(&unsigned_tx.0.txid(), &htlc, true, &keys);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.b_htlc_key).unwrap();
cltv_expiry: 500,
payment_hash: [0; 32],
state: HTLCState::Committed,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None,
};
let mut sha = Sha256::new();
cltv_expiry: 501,
payment_hash: [0; 32],
state: HTLCState::Committed,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None,
};
let mut sha = Sha256::new();
cltv_expiry: 502,
payment_hash: [0; 32],
state: HTLCState::Committed,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None,
};
let mut sha = Sha256::new();
cltv_expiry: 503,
payment_hash: [0; 32],
state: HTLCState::Committed,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None,
};
let mut sha = Sha256::new();
cltv_expiry: 504,
payment_hash: [0; 32],
state: HTLCState::Committed,
+ fail_reason: None,
+ local_removed_fulfilled: false,
pending_forward_state: None,
};
let mut sha = Sha256::new();
use std::{ptr, mem};
use std::time::{Instant,Duration};
-/// Stores the info we will need to send when we want to forward an HTLC onwards
-pub struct PendingForwardHTLCInfo {
- onion_packet: Option<msgs::OnionPacket>,
- payment_hash: [u8; 32],
- short_channel_id: u64,
- prev_short_channel_id: u64,
- amt_to_forward: u64,
- outgoing_cltv_value: u32,
-}
+mod channel_held_info {
+ use ln::msgs;
-#[cfg(feature = "fuzztarget")]
-impl PendingForwardHTLCInfo {
- pub fn dummy() -> Self {
- Self {
- onion_packet: None,
- payment_hash: [0; 32],
- short_channel_id: 0,
- prev_short_channel_id: 0,
- amt_to_forward: 0,
- outgoing_cltv_value: 0,
+ /// Stores the info we will need to send when we want to forward an HTLC onwards
+ pub struct PendingForwardHTLCInfo {
+ pub(super) onion_packet: Option<msgs::OnionPacket>,
+ pub(super) payment_hash: [u8; 32],
+ pub(super) short_channel_id: u64,
+ pub(super) prev_short_channel_id: u64,
+ pub(super) amt_to_forward: u64,
+ pub(super) outgoing_cltv_value: u32,
+ }
+
+ #[cfg(feature = "fuzztarget")]
+ impl PendingForwardHTLCInfo {
+ pub fn dummy() -> Self {
+ Self {
+ onion_packet: None,
+ payment_hash: [0; 32],
+ short_channel_id: 0,
+ prev_short_channel_id: 0,
+ amt_to_forward: 0,
+ outgoing_cltv_value: 0,
+ }
+ }
+ }
+
+ #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+ pub enum HTLCFailReason {
+ ErrorPacket {
+ err: msgs::OnionErrorPacket,
+ },
+ Reason {
+ failure_code: u16,
+ data: Vec<u8>,
+ }
+ }
+
+ #[cfg(feature = "fuzztarget")]
+ impl HTLCFailReason {
+ pub fn dummy() -> Self {
+ HTLCFailReason::Reason {
+ failure_code: 0, data: Vec::new(),
+ }
}
}
}
+#[cfg(feature = "fuzztarget")]
+pub use self::channel_held_info::*;
+#[cfg(not(feature = "fuzztarget"))]
+pub(crate) use self::channel_held_info::*;
enum PendingOutboundHTLC {
IntermediaryHopData {
}
}
-enum HTLCFailReason<'a> {
- ErrorPacket {
- err: &'a msgs::OnionErrorPacket,
- },
- Reason {
- failure_code: u16,
- data: &'a[u8],
- }
-}
-
/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
};
for payment_hash in res.1 {
// unknown_next_peer...I dunno who that is anymore....
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: &[0; 0] });
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
Ok(res.0)
}
res
}
+ #[inline]
fn gen_ammag_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
hmac.input(&shared_secret[..]);
packet
}
+ #[inline]
fn build_first_hop_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
for failed_forward in failed_forwards.drain(..) {
match failed_forward.2 {
- None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: &[0;0] }),
- Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: &chan_update.encode()[..] }),
+ None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: Vec::new() }),
+ Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: chan_update.encode() }),
};
}
/// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: &[0;0] })
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() })
}
fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, payment_hash: &[u8; 32], onion_error: HTLCFailReason) -> bool {
PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret } => {
let err_packet = match onion_error {
HTLCFailReason::Reason { failure_code, data } => {
- let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, data).encode();
+ let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
},
HTLCFailReason::ErrorPacket { err } => {
}
};
- let (node_id, fail_msg) = {
+ let (node_id, fail_msgs) = {
let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => return false
};
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
- match chan.get_update_fail_htlc(payment_hash, err_packet) {
+ match chan.get_update_fail_htlc_and_commit(payment_hash, err_packet) {
Ok(msg) => (chan.get_their_node_id(), msg),
Err(_e) => {
//TODO: Do something with e?
}
};
- mem::drop(channel_state);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::SendFailHTLC {
- node_id,
- msg: fail_msg
- });
+ match fail_msgs {
+ Some(msgs) => {
+ mem::drop(channel_state);
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::SendFailHTLC {
+ node_id,
+ msg: msgs.0,
+ commitment_msg: msgs.1,
+ });
+ },
+ None => {},
+ }
true
},
/// Provides a payment preimage in response to a PaymentReceived event, returning true and
/// generating message events for the net layer to claim the payment, if possible. Thus, you
/// should probably kick the net layer to go send messages if this returns true!
+ /// May panic if called except in response to a PaymentReceived event.
pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
self.claim_funds_internal(payment_preimage, true)
}
false
},
PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, .. } => {
- let (node_id, fulfill_msg) = {
+ let (node_id, fulfill_msgs, monitor) = {
let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => return false
};
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
- match chan.get_update_fulfill_htlc(payment_preimage) {
- Ok(msg) => (chan.get_their_node_id(), msg),
+ match chan.get_update_fulfill_htlc_and_commit(payment_preimage) {
+ Ok(msg) => (chan.get_their_node_id(), msg, if from_user { Some(chan.channel_monitor()) } else { None }),
Err(_e) => {
//TODO: Do something with e?
return false;
};
mem::drop(channel_state);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::SendFulfillHTLC {
- node_id: node_id,
- msg: fulfill_msg
- });
+ match fulfill_msgs {
+ Some(msgs) => {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::SendFulfillHTLC {
+ node_id: node_id,
+ msg: msgs.0,
+ commitment_msg: msgs.1,
+ });
+ },
+ None => {},
+ }
- true
+ //TODO: It may not be possible to handle add_update_monitor fails gracefully, maybe
+ //it should return no Err? Sadly, panic!()s instead doesn't help much :(
+ if from_user {
+ match self.monitor.add_update_monitor(monitor.as_ref().unwrap().get_funding_txo().unwrap(), monitor.unwrap()) {
+ Ok(()) => true,
+ Err(_) => true,
+ }
+ } else { true }
},
}
}
};
for payment_hash in res.2 {
// unknown_next_peer...I dunno who that is anymore....
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: &[0; 0] });
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
Ok((res.0, res.1))
}
}
fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
- {
+ //TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ // Claim funds first, cause we don't really care if the channel we received the message on
+ // is broken, we may have enough info to get our own money!
+ self.claim_funds_internal(msg.payment_preimage.clone(), false);
+
+ let monitor = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
}
chan.update_fulfill_htlc(&msg)?;
+ chan.channel_monitor()
},
None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
}
- }
- //TODO: Delay the claimed_funds relaying just like we do outbound relay!
- self.claim_funds_internal(msg.payment_preimage.clone(), false);
+ };
+ self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
Ok(())
}
fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
let mut channel_state = self.channel_state.lock().unwrap();
- let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) {
+ match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
}
- chan.update_fail_htlc(&msg)?
+ chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
},
None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
- };
- self.fail_htlc_backwards_internal(channel_state, &payment_hash, HTLCFailReason::ErrorPacket { err: &msg.reason });
- Ok(())
+ }
}
fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
let mut channel_state = self.channel_state.lock().unwrap();
- let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) {
+ match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
}
- chan.update_fail_malformed_htlc(&msg)?
+ chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
},
None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
- };
- self.fail_htlc_backwards_internal(channel_state, &payment_hash, HTLCFailReason::Reason { failure_code: msg.failure_code, data: &[0;0] });
- Ok(())
+ }
}
- fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<msgs::RevokeAndACK, HandleError> {
- let mut forward_event = None;
+ fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
let (res, monitor) = {
let mut channel_state = self.channel_state.lock().unwrap();
-
- let ((res, mut forwarding_infos), monitor) = match channel_state.by_id.get_mut(&msg.channel_id) {
+ match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
(chan.commitment_signed(&msg)?, chan.channel_monitor())
},
None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
- };
+ }
+ };
+ //TODO: Only if we store HTLC sigs
+ self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
+
+ Ok(res)
+ }
+ fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+ let ((res, mut pending_forwards, mut pending_failures), monitor) = {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ match channel_state.by_id.get_mut(&msg.channel_id) {
+ Some(chan) => {
+ if chan.get_their_node_id() != *their_node_id {
+ return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
+ }
+ (chan.revoke_and_ack(&msg)?, chan.channel_monitor())
+ },
+ None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
+ }
+ };
+ self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
+ for failure in pending_failures.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failure.0, failure.1);
+ }
+
+ let mut forward_event = None;
+ if !pending_forwards.is_empty() {
+ let mut channel_state = self.channel_state.lock().unwrap();
if channel_state.forward_htlcs.is_empty() {
forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
channel_state.next_forward = forward_event.unwrap();
}
- for forward_info in forwarding_infos.drain(..) {
+ for forward_info in pending_forwards.drain(..) {
match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(forward_info);
}
}
}
-
- (res, monitor)
- };
- //TODO: Only if we store HTLC sigs
- self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
-
+ }
match forward_event {
Some(time) => {
let mut pending_events = self.pending_events.lock().unwrap();
Ok(res)
}
- fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
- let (res, monitor) = {
- let mut channel_state = self.channel_state.lock().unwrap();
- match channel_state.by_id.get_mut(&msg.channel_id) {
- Some(chan) => {
- if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
- }
- (chan.revoke_and_ack(&msg)?, chan.channel_monitor())
- },
- None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
- }
- };
- self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
- Ok(res)
- }
-
fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
use rand::{thread_rng,Rng};
- use std::sync::{Arc, Mutex};
+ use std::collections::HashMap;
use std::default::Default;
+ use std::sync::{Arc, Mutex};
use std::time::Instant;
fn build_test_onion_keys() -> Vec<OnionKeys> {
assert_eq!(onion_packet_5.data, hex_bytes("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
}
- static mut CHAN_COUNT: u16 = 0;
- fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction) {
+ fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- let chan_id = unsafe { CHAN_COUNT };
- chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id as u32; 1]);
+ chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
for i in 2..100 {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
}
}
- fn create_chan_between_nodes(node_a: &ChannelManager, chain_a: &chaininterface::ChainWatchInterfaceUtil, node_b: &ChannelManager, chain_b: &chaininterface::ChainWatchInterfaceUtil) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256) {
- let open_chan = node_a.create_channel(node_b.get_our_node_id(), 100000, 42).unwrap();
- let accept_chan = node_b.handle_open_channel(&node_a.get_our_node_id(), &open_chan).unwrap();
- node_a.handle_accept_channel(&node_b.get_our_node_id(), &accept_chan).unwrap();
+ struct Node {
+ feeest: Arc<test_utils::TestFeeEstimator>,
+ chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
+ tx_broadcaster: Arc<test_utils::TestBroadcaster>,
+ chan_monitor: Arc<test_utils::TestChannelMonitor>,
+ node_id: SecretKey,
+ node: Arc<ChannelManager>,
+ router: Router,
+ }
+
+ static mut CHAN_COUNT: u32 = 0;
+ fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256, Transaction) {
+ let open_chan = node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 42).unwrap();
+ let accept_chan = node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &open_chan).unwrap();
+ node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap();
let chan_id = unsafe { CHAN_COUNT };
let tx;
let funding_output;
- let events_1 = node_a.get_and_clear_pending_events();
+ let events_1 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_1.len(), 1);
match events_1[0] {
Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
}]};
funding_output = (Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0);
- node_a.funding_transaction_generated(&temporary_channel_id, funding_output.clone());
- //TODO: Check that we got added to chan_monitor_a!
+ node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output.clone());
+ let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
},
_ => panic!("Unexpected event"),
}
- let events_2 = node_a.get_and_clear_pending_events();
+ let events_2 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_2.len(), 1);
let funding_signed = match events_2[0] {
Event::SendFundingCreated { ref node_id, ref msg } => {
- assert_eq!(*node_id, node_b.get_our_node_id());
- node_b.handle_funding_created(&node_a.get_our_node_id(), msg).unwrap()
- //TODO: Check that we got added to chan_monitor_b!
+ assert_eq!(*node_id, node_b.node.get_our_node_id());
+ let res = node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), msg).unwrap();
+ let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ res
},
_ => panic!("Unexpected event"),
};
- node_a.handle_funding_signed(&node_b.get_our_node_id(), &funding_signed).unwrap();
+ node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &funding_signed).unwrap();
- let events_3 = node_a.get_and_clear_pending_events();
+ let events_3 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_3.len(), 1);
match events_3[0] {
Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
_ => panic!("Unexpected event"),
};
- confirm_transaction(&chain_a, &tx);
- let events_4 = node_a.get_and_clear_pending_events();
+ confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
+ let events_4 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_4.len(), 1);
match events_4[0] {
Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
- assert_eq!(*node_id, node_b.get_our_node_id());
+ assert_eq!(*node_id, node_b.node.get_our_node_id());
assert!(announcement_sigs.is_none());
- node_b.handle_funding_locked(&node_a.get_our_node_id(), msg).unwrap()
+ node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap()
},
_ => panic!("Unexpected event"),
};
let channel_id;
- confirm_transaction(&chain_b, &tx);
- let events_5 = node_b.get_and_clear_pending_events();
+ confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
+ let events_5 = node_b.node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
let as_announcement_sigs = match events_5[0] {
Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
- assert_eq!(*node_id, node_a.get_our_node_id());
+ assert_eq!(*node_id, node_a.node.get_our_node_id());
channel_id = msg.channel_id.clone();
- let as_announcement_sigs = node_a.handle_funding_locked(&node_b.get_our_node_id(), msg).unwrap().unwrap();
- node_a.handle_announcement_signatures(&node_b.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
+ let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap();
+ node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
as_announcement_sigs
},
_ => panic!("Unexpected event"),
};
- let events_6 = node_a.get_and_clear_pending_events();
+ let events_6 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_6.len(), 1);
let (announcement, as_update) = match events_6[0] {
Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
_ => panic!("Unexpected event"),
};
- node_b.handle_announcement_signatures(&node_a.get_our_node_id(), &as_announcement_sigs).unwrap();
- let events_7 = node_b.get_and_clear_pending_events();
+ node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap();
+ let events_7 = node_b.node.get_and_clear_pending_events();
assert_eq!(events_7.len(), 1);
let bs_update = match events_7[0] {
Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
CHAN_COUNT += 1;
}
- ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id)
+ ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx)
}
- fn close_channel(outbound_node: &ChannelManager, outbound_broadcaster: &test_utils::TestBroadcaster, inbound_node: &ChannelManager, inbound_broadcaster: &test_utils::TestBroadcaster, channel_id: &Uint256, close_inbound_first: bool) {
- let (node_a, broadcaster_a) = if close_inbound_first { (inbound_node, inbound_broadcaster) } else { (outbound_node, outbound_broadcaster) };
- let (node_b, broadcaster_b) = if close_inbound_first { (outbound_node, outbound_broadcaster) } else { (inbound_node, inbound_broadcaster) };
+ fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256, Transaction) {
+ let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]);
+ for node in nodes {
+ assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
+ node.router.handle_channel_update(&chan_announcement.1).unwrap();
+ node.router.handle_channel_update(&chan_announcement.2).unwrap();
+ }
+ (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
+ }
+
+ fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &Uint256, funding_tx: Transaction, close_inbound_first: bool) {
+ let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) };
+ let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
let (tx_a, tx_b);
let shutdown_a = node_a.close_channel(channel_id).unwrap();
tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
}
assert_eq!(tx_a, tx_b);
+ let mut funding_tx_map = HashMap::new();
+ funding_tx_map.insert(funding_tx.txid(), funding_tx);
+ tx_a.verify(&funding_tx_map).unwrap();
}
struct SendEvent {
}
static mut PAYMENT_COUNT: u8 = 0;
- fn send_along_route(origin_node: &ChannelManager, route: Route, expected_route: &[&ChannelManager], recv_value: u64) -> ([u8; 32], [u8; 32]) {
+ fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] };
unsafe { PAYMENT_COUNT += 1 };
let our_payment_hash = {
};
let mut payment_event = {
- let msgs = origin_node.send_payment(route, our_payment_hash).unwrap().unwrap();
+ let msgs = origin_node.node.send_payment(route, our_payment_hash).unwrap().unwrap();
SendEvent {
- node_id: expected_route[0].get_our_node_id(),
+ node_id: expected_route[0].node.get_our_node_id(),
msgs: vec!(msgs.0),
commitment_msg: msgs.1,
}
};
let mut prev_node = origin_node;
- for (idx, node) in expected_route.iter().enumerate() {
- assert_eq!(node.get_our_node_id(), payment_event.node_id);
+ for (idx, &node) in expected_route.iter().enumerate() {
+ assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
- node.handle_update_add_htlc(&prev_node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
- let revoke_and_ack = node.handle_commitment_signed(&prev_node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
- assert!(prev_node.handle_revoke_and_ack(&node.get_our_node_id(), &revoke_and_ack).unwrap().is_none());
+ node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ {
+ let added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 0);
+ }
+
+ let revoke_and_ack = node.node.handle_commitment_signed(&prev_node.node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ {
+ let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+ assert!(prev_node.node.handle_revoke_and_ack(&node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+ let prev_revoke_and_ack = prev_node.node.handle_commitment_signed(&node.node.get_our_node_id(), &revoke_and_ack.1.unwrap()).unwrap();
+ {
+ let mut added_monitors = prev_node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 2);
+ added_monitors.clear();
+ }
+ assert!(node.node.handle_revoke_and_ack(&prev_node.node.get_our_node_id(), &prev_revoke_and_ack.0).unwrap().is_none());
+ assert!(prev_revoke_and_ack.1.is_none());
+ {
+ let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
- let events_1 = node.get_and_clear_pending_events();
+ let events_1 = node.node.get_and_clear_pending_events();
assert_eq!(events_1.len(), 1);
match events_1[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
- node.channel_state.lock().unwrap().next_forward = Instant::now();
- node.process_pending_htlc_forward();
+ node.node.channel_state.lock().unwrap().next_forward = Instant::now();
+ node.node.process_pending_htlc_forward();
- let mut events_2 = node.get_and_clear_pending_events();
+ let mut events_2 = node.node.get_and_clear_pending_events();
assert_eq!(events_2.len(), 1);
if idx == expected_route.len() - 1 {
match events_2[0] {
(our_payment_preimage, our_payment_hash)
}
- fn claim_payment(origin_node: &ChannelManager, expected_route: &[&ChannelManager], our_payment_preimage: [u8; 32]) {
- assert!(expected_route.last().unwrap().claim_funds(our_payment_preimage));
+ fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
+ assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
+ {
+ let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+
+ let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
+ macro_rules! update_fulfill_dance {
+ ($node: expr, $prev_node: expr) => {
+ {
+ $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+ let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap();
+ {
+ let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 2);
+ added_monitors.clear();
+ }
+ assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none());
+ let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap();
+ assert!(revoke_and_ack.1.is_none());
+ {
+ let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 2);
+ added_monitors.clear();
+ }
+ assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+ {
+ let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+ }
+ }
+ }
- let mut expected_next_node = expected_route.last().unwrap().get_our_node_id();
+ let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
let mut prev_node = expected_route.last().unwrap();
- let mut next_msg = None;
for node in expected_route.iter().rev() {
- assert_eq!(expected_next_node, node.get_our_node_id());
- match next_msg {
- Some(msg) => {
- node.handle_update_fulfill_htlc(&prev_node.get_our_node_id(), &msg).unwrap();
- }, None => {}
+ assert_eq!(expected_next_node, node.node.get_our_node_id());
+ if next_msgs.is_some() {
+ update_fulfill_dance!(node, prev_node);
}
- let events = node.get_and_clear_pending_events();
+ let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::SendFulfillHTLC { ref node_id, ref msg } => {
+ Event::SendFulfillHTLC { ref node_id, ref msg, ref commitment_msg } => {
expected_next_node = node_id.clone();
- next_msg = Some(msg.clone());
+ next_msgs = Some((msg.clone(), commitment_msg.clone()));
},
_ => panic!("Unexpected event"),
};
prev_node = node;
}
- assert_eq!(expected_next_node, origin_node.get_our_node_id());
- origin_node.handle_update_fulfill_htlc(&expected_route.first().unwrap().get_our_node_id(), &next_msg.unwrap()).unwrap();
+ assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+ update_fulfill_dance!(origin_node, expected_route.first().unwrap());
- let events = origin_node.get_and_clear_pending_events();
+ let events = origin_node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentSent { payment_preimage } => {
}
}
- fn route_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) -> ([u8; 32], [u8; 32]) {
- let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
+ fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
+ let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
assert_eq!(route.hops.len(), expected_route.len());
for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
- assert_eq!(hop.pubkey, node.get_our_node_id());
+ assert_eq!(hop.pubkey, node.node.get_our_node_id());
}
send_along_route(origin_node, route, expected_route, recv_value)
}
- fn route_over_limit(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) {
- let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
+ fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
+ let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
assert_eq!(route.hops.len(), expected_route.len());
for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
- assert_eq!(hop.pubkey, node.get_our_node_id());
+ assert_eq!(hop.pubkey, node.node.get_our_node_id());
}
let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] };
ret
};
- let err = origin_node.send_payment(route, our_payment_hash).err().unwrap();
+ let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
assert_eq!(err.err, "Cannot send value that would put us over our max HTLC value in flight");
}
- fn send_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) {
- let our_payment_preimage = route_payment(origin_node, origin_router, expected_route, recv_value).0;
- claim_payment(origin_node, expected_route, our_payment_preimage);
+ fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
+ let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
+ claim_payment(&origin, expected_route, our_payment_preimage);
}
- fn send_failed_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager]) {
- let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), 1000000, 142).unwrap();
+ fn send_failed_payment(origin_node: &Node, expected_route: &[&Node]) {
+ let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), 1000000, 142).unwrap();
assert_eq!(route.hops.len(), expected_route.len());
for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
- assert_eq!(hop.pubkey, node.get_our_node_id());
+ assert_eq!(hop.pubkey, node.node.get_our_node_id());
}
let our_payment_hash = send_along_route(origin_node, route, expected_route, 1000000).1;
- assert!(expected_route.last().unwrap().fail_htlc_backwards(&our_payment_hash));
+ assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
- let mut expected_next_node = expected_route.last().unwrap().get_our_node_id();
+ let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
+ macro_rules! update_fail_dance {
+ ($node: expr, $prev_node: expr) => {
+ {
+ $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+ let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap();
+ {
+ let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+ assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none());
+ let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap();
+ assert!(revoke_and_ack.1.is_none());
+ {
+ let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 2);
+ added_monitors.clear();
+ }
+ assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+ {
+ let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+ }
+ }
+ }
+
+ let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
let mut prev_node = expected_route.last().unwrap();
- let mut next_msg = None;
for node in expected_route.iter().rev() {
- assert_eq!(expected_next_node, node.get_our_node_id());
- match next_msg {
- Some(msg) => {
- node.handle_update_fail_htlc(&prev_node.get_our_node_id(), &msg).unwrap();
- }, None => {}
+ assert_eq!(expected_next_node, node.node.get_our_node_id());
+ if next_msgs.is_some() {
+ update_fail_dance!(node, prev_node);
}
- let events = node.get_and_clear_pending_events();
+ let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::SendFailHTLC { ref node_id, ref msg } => {
+ Event::SendFailHTLC { ref node_id, ref msg, ref commitment_msg } => {
expected_next_node = node_id.clone();
- next_msg = Some(msg.clone());
+ next_msgs = Some((msg.clone(), commitment_msg.clone()));
},
_ => panic!("Unexpected event"),
};
prev_node = node;
}
- assert_eq!(expected_next_node, origin_node.get_our_node_id());
- origin_node.handle_update_fail_htlc(&expected_route.first().unwrap().get_our_node_id(), &next_msg.unwrap()).unwrap();
+ assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+ update_fail_dance!(origin_node, expected_route.first().unwrap());
- let events = origin_node.get_and_clear_pending_events();
+ let events = origin_node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentFailed { payment_hash } => {
}
}
+ fn create_network(node_count: usize) -> Vec<Node> {
+ let mut nodes = Vec::new();
+ let mut rng = thread_rng();
+ let secp_ctx = Secp256k1::new();
+
+ for _ in 0..node_count {
+ let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
+ let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
+ let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+ let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
+ let node_id = {
+ let mut key_slice = [0; 32];
+ rng.fill_bytes(&mut key_slice);
+ SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
+ };
+ let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone()).unwrap();
+ let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id).unwrap());
+ nodes.push(Node { feeest, chain_monitor, tx_broadcaster, chan_monitor, node_id, node, router });
+ }
+
+ nodes
+ }
+
#[test]
fn fake_network_test() {
// Simple test which builds a network of ChannelManagers, connects them to each other, and
// tests that payments get routed and transactions broadcast in semi-reasonable ways.
- let mut rng = thread_rng();
- let secp_ctx = Secp256k1::new();
-
- let feeest_1 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
- let chain_monitor_1 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
- let chan_monitor_1 = Arc::new(test_utils::TestChannelMonitor{});
- let tx_broadcaster_1 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
- let node_id_1 = {
- let mut key_slice = [0; 32];
- rng.fill_bytes(&mut key_slice);
- SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
- };
- let node_1 = ChannelManager::new(node_id_1.clone(), 0, true, Network::Testnet, feeest_1.clone(), chan_monitor_1.clone(), chain_monitor_1.clone(), tx_broadcaster_1.clone()).unwrap();
- let router_1 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_1).unwrap());
-
- let feeest_2 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
- let chain_monitor_2 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
- let chan_monitor_2 = Arc::new(test_utils::TestChannelMonitor{});
- let tx_broadcaster_2 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
- let node_id_2 = {
- let mut key_slice = [0; 32];
- rng.fill_bytes(&mut key_slice);
- SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
- };
- let node_2 = ChannelManager::new(node_id_2.clone(), 0, true, Network::Testnet, feeest_2.clone(), chan_monitor_2.clone(), chain_monitor_2.clone(), tx_broadcaster_2.clone()).unwrap();
- let router_2 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_2).unwrap());
-
- let feeest_3 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
- let chain_monitor_3 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
- let chan_monitor_3 = Arc::new(test_utils::TestChannelMonitor{});
- let tx_broadcaster_3 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
- let node_id_3 = {
- let mut key_slice = [0; 32];
- rng.fill_bytes(&mut key_slice);
- SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
- };
- let node_3 = ChannelManager::new(node_id_3.clone(), 0, true, Network::Testnet, feeest_3.clone(), chan_monitor_3.clone(), chain_monitor_3.clone(), tx_broadcaster_3.clone()).unwrap();
- let router_3 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_3).unwrap());
-
- let feeest_4 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
- let chain_monitor_4 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
- let chan_monitor_4 = Arc::new(test_utils::TestChannelMonitor{});
- let tx_broadcaster_4 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
- let node_id_4 = {
- let mut key_slice = [0; 32];
- rng.fill_bytes(&mut key_slice);
- SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
- };
- let node_4 = ChannelManager::new(node_id_4.clone(), 0, true, Network::Testnet, feeest_4.clone(), chan_monitor_4.clone(), chain_monitor_4.clone(), tx_broadcaster_4.clone()).unwrap();
- let router_4 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_4).unwrap());
+ let nodes = create_network(4);
// Create some initial channels
- let chan_announcement_1 = create_chan_between_nodes(&node_1, &chain_monitor_1, &node_2, &chain_monitor_2);
- for router in vec!(&router_1, &router_2, &router_3, &router_4) {
- assert!(router.handle_channel_announcement(&chan_announcement_1.0).unwrap());
- router.handle_channel_update(&chan_announcement_1.1).unwrap();
- router.handle_channel_update(&chan_announcement_1.2).unwrap();
- }
- let chan_announcement_2 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_3, &chain_monitor_3);
- for router in vec!(&router_1, &router_2, &router_3, &router_4) {
- assert!(router.handle_channel_announcement(&chan_announcement_2.0).unwrap());
- router.handle_channel_update(&chan_announcement_2.1).unwrap();
- router.handle_channel_update(&chan_announcement_2.2).unwrap();
- }
- let chan_announcement_3 = create_chan_between_nodes(&node_3, &chain_monitor_3, &node_4, &chain_monitor_4);
- for router in vec!(&router_1, &router_2, &router_3, &router_4) {
- assert!(router.handle_channel_announcement(&chan_announcement_3.0).unwrap());
- router.handle_channel_update(&chan_announcement_3.1).unwrap();
- router.handle_channel_update(&chan_announcement_3.2).unwrap();
- }
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+ let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
// Rebalance the network a bit by relaying one payment through all the channels...
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
// Send some more payments
- send_payment(&node_2, &router_2, &vec!(&*node_3, &*node_4)[..], 1000000);
- send_payment(&node_4, &router_4, &vec!(&*node_3, &*node_2, &*node_1)[..], 1000000);
- send_payment(&node_4, &router_4, &vec!(&*node_3, &*node_2)[..], 1000000);
+ send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
// Test failure packets
- send_failed_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..]);
+ send_failed_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..]);
// Add a new channel that skips 3
- let chan_announcement_4 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_4, &chain_monitor_4);
- for router in vec!(&router_1, &router_2, &router_3, &router_4) {
- assert!(router.handle_channel_announcement(&chan_announcement_4.0).unwrap());
- router.handle_channel_update(&chan_announcement_4.1).unwrap();
- router.handle_channel_update(&chan_announcement_4.2).unwrap();
- }
+ let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
- send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 1000000);
- send_payment(&node_3, &router_3, &vec!(&*node_4)[..], 1000000);
- send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
- send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
- send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
- send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
- send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
+ send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
// Do some rebalance loop payments, simultaneously
let mut hops = Vec::with_capacity(3);
hops.push(RouteHop {
- pubkey: node_3.get_our_node_id(),
- short_channel_id: chan_announcement_2.1.contents.short_channel_id,
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
fee_msat: 0,
- cltv_expiry_delta: chan_announcement_3.1.contents.cltv_expiry_delta as u32
+ cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
});
hops.push(RouteHop {
- pubkey: node_4.get_our_node_id(),
- short_channel_id: chan_announcement_3.1.contents.short_channel_id,
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
fee_msat: 0,
- cltv_expiry_delta: chan_announcement_4.2.contents.cltv_expiry_delta as u32
+ cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
});
hops.push(RouteHop {
- pubkey: node_2.get_our_node_id(),
- short_channel_id: chan_announcement_4.1.contents.short_channel_id,
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
fee_msat: 1000000,
cltv_expiry_delta: 142,
});
- hops[1].fee_msat = chan_announcement_4.2.contents.fee_base_msat as u64 + chan_announcement_4.2.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
- hops[0].fee_msat = chan_announcement_3.1.contents.fee_base_msat as u64 + chan_announcement_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
- let payment_preimage_1 = send_along_route(&node_2, Route { hops }, &vec!(&*node_3, &*node_4, &*node_2)[..], 1000000).0;
+ hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
let mut hops = Vec::with_capacity(3);
hops.push(RouteHop {
- pubkey: node_4.get_our_node_id(),
- short_channel_id: chan_announcement_4.1.contents.short_channel_id,
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
fee_msat: 0,
- cltv_expiry_delta: chan_announcement_3.2.contents.cltv_expiry_delta as u32
+ cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
});
hops.push(RouteHop {
- pubkey: node_3.get_our_node_id(),
- short_channel_id: chan_announcement_3.1.contents.short_channel_id,
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
fee_msat: 0,
- cltv_expiry_delta: chan_announcement_2.2.contents.cltv_expiry_delta as u32
+ cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
});
hops.push(RouteHop {
- pubkey: node_2.get_our_node_id(),
- short_channel_id: chan_announcement_2.1.contents.short_channel_id,
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
fee_msat: 1000000,
cltv_expiry_delta: 142,
});
- hops[1].fee_msat = chan_announcement_2.2.contents.fee_base_msat as u64 + chan_announcement_2.2.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
- hops[0].fee_msat = chan_announcement_3.2.contents.fee_base_msat as u64 + chan_announcement_3.2.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
- let payment_preimage_2 = send_along_route(&node_2, Route { hops }, &vec!(&*node_4, &*node_3, &*node_2)[..], 1000000).0;
+ hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_preimage_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).0;
// Claim the rebalances...
- claim_payment(&node_2, &vec!(&*node_4, &*node_3, &*node_2)[..], payment_preimage_2);
- claim_payment(&node_2, &vec!(&*node_3, &*node_4, &*node_2)[..], payment_preimage_1);
+ claim_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_preimage_2);
+ claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
// Add a duplicate new channel from 2 to 4
- let chan_announcement_5 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_4, &chain_monitor_4);
- for router in vec!(&router_1, &router_2, &router_3, &router_4) {
- assert!(router.handle_channel_announcement(&chan_announcement_5.0).unwrap());
- router.handle_channel_update(&chan_announcement_5.1).unwrap();
- router.handle_channel_update(&chan_announcement_5.2).unwrap();
- }
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
// Send some payments across both channels
- let payment_preimage_3 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
- let payment_preimage_4 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
- let payment_preimage_5 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- route_over_limit(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000);
+ route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
//TODO: Test that routes work again here as we've been notified that the channel is full
- claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_3);
- claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_4);
- claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_5);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
// Close down the channels...
- close_channel(&node_1, &tx_broadcaster_1, &node_2, &tx_broadcaster_2, &chan_announcement_1.3, true);
- close_channel(&node_2, &tx_broadcaster_2, &node_3, &tx_broadcaster_3, &chan_announcement_2.3, false);
- close_channel(&node_3, &tx_broadcaster_3, &node_4, &tx_broadcaster_4, &chan_announcement_3.3, true);
- close_channel(&node_2, &tx_broadcaster_2, &node_4, &tx_broadcaster_4, &chan_announcement_4.3, false);
+ close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+ close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+ close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+ close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
// Check that we processed all pending events
- for node in vec!(&node_1, &node_2, &node_3, &node_4) {
- assert_eq!(node.get_and_clear_pending_events().len(), 0);
+ for node in nodes {
+ assert_eq!(node.node.get_and_clear_pending_events().len(), 0);
+ assert_eq!(node.chan_monitor.added_monitors.lock().unwrap().len(), 0);
}
}
}