use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError, ChannelKeys};
-use ln::channelmonitor::ManyChannelMonitor;
+use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
use ln::router::{Route,RouteHop};
use ln::msgs;
use ln::msgs::{HandleError,ChannelMessageHandler};
OutboundRoute {
route: Route,
session_priv: SecretKey,
+ /// Technically we can recalculate this from the route, but we cache it here to avoid
+ /// doing a double-pass on route when we get a failure back
+ first_hop_htlc_msat: u64,
},
}
#[cfg(test)]
HTLCSource::OutboundRoute {
route: Route { hops: Vec::new() },
session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
+ first_hop_htlc_msat: 0,
}
}
}
logger: Arc<Logger>,
}
+/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
+/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+/// ie the node we forwarded the payment on to should always have enough room to reliably time out
+/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
+const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
+// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
+// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
+// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
+#[deny(const_err)]
+#[allow(dead_code)]
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
+
+// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
+// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
+#[deny(const_err)]
+#[allow(dead_code)]
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
macro_rules! secp_call {
( $res: expr, $err: expr ) => {
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter() {
- if channel.is_usable() {
+ // Note we use is_live here instead of usable which leads to somewhat confused
+ // internal/external nomenclature, but that's ok cause that's probably what the user
+ // really wanted anyway.
+ if channel.is_live() {
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
}
};
- //TODO: Check that msg.cltv_expiry is within acceptable bounds!
-
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
// OUR PAYMENT!
- if next_hop_data.data.amt_to_forward != msg.amount_msat {
+ // final_expiry_too_soon
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.data.amt_to_forward > msg.amount_msat {
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
}
+ // final_incorrect_cltv_expiry
if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
}
if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
let forwarding_id = match id_option {
- None => {
+ None => { // unknown_next_peer
return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
},
Some(id) => id.clone(),
};
- if let Some((err, code, chan_update)) = {
+ if let Some((err, code, chan_update)) = loop {
let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
- if !chan.is_live() {
- Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap()))
- } else {
- let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
- if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward {
- Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap()))
- } else {
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 {
- Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap()))
- } else {
- None
- }
- }
+
+ if !chan.is_live() { // channel_disabled
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+ if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
+ break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
- } {
- return_err!(err, code, &chan_update.encode_with_len()[..]);
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ break None;
+ }
+ {
+ let mut res = Vec::with_capacity(8 + 128);
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+ }
+ else if code == 0x1000 | 13 {
+ res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+ if let Some(chan_update) = chan_update {
+ res.extend_from_slice(&chan_update.encode_with_len()[..]);
+ }
+ return_err!(err, code, &res[..]);
}
}
}
};
let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
- let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); //TODO Can we unwrap here?
+ let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key);
Ok(msgs::ChannelUpdate {
signature: sig,
let channel_state = channel_state_lock.borrow_parts();
let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
- None => return Err(APIError::RouteError{err: "No channel available with first hop!"}),
+ None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
Some(id) => id.clone(),
};
return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
}
if !chan.is_live() {
- return Err(APIError::RouteError{err: "Peer for first hop currently disconnected!"});
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
}
chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
route: route.clone(),
session_priv: session_priv.clone(),
- }, onion_packet).map_err(|he| APIError::RouteError{err: he.err})?
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
};
let first_hop_node_id = route.hops.first().unwrap().pubkey;
/// May panic if the funding_txo is duplicative with some other channel (note that this should
/// be trivially prevented by using unique funding transaction keys per-channel).
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
-
macro_rules! add_pending_event {
($event: expr) => {
{
/// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
+ // TODO: Add ability to return 0x4000|16 (incorrect_payment_amount) if the amount we
+ // received is < expected or > 2*expected
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
match source {
HTLCSource::OutboundRoute { .. } => {
mem::drop(channel_state);
-
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentFailed {
- payment_hash: payment_hash.clone()
- });
+ if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
+ let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
+ let mut pending_events = self.pending_events.lock().unwrap();
+ if let Some(channel_update) = channel_update {
+ pending_events.push(events::Event::PaymentFailureNetworkUpdate {
+ update: channel_update,
+ });
+ }
+ pending_events.push(events::Event::PaymentFailed {
+ payment_hash: payment_hash.clone(),
+ rejected_by_dest: !payment_retryable,
+ });
+ } else {
+ panic!("should have onion error packet here");
+ }
},
HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
let err_packet = match onion_error {
Ok(())
}
- fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, MsgHandleErrInternal> {
+ // Process failure we got back from upstream on a payment we sent. Returns update and a boolean
+ // indicating that the payment itself failed
+ fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool) {
+ if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
+ macro_rules! onion_failure_log {
+ ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => {
+ log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value);
+ };
+ ( $error_code_textual: expr, $error_code: expr ) => {
+ log_trace!(self, "{}({})", $error_code_textual, $error_code);
+ };
+ }
+
+ const BADONION: u16 = 0x8000;
+ const PERM: u16 = 0x4000;
+ const UPDATE: u16 = 0x1000;
+
+ let mut res = None;
+ let mut htlc_msat = *first_hop_htlc_msat;
+
+ // Handle packed channel/node updates for passing back for the route handler
+ Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
+ if res.is_some() { return; }
+
+ let incoming_htlc_msat = htlc_msat;
+ let amt_to_forward = htlc_msat - route_hop.fee_msat;
+ htlc_msat = amt_to_forward;
+
+ let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
+
+ let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
+ decryption_tmp.resize(packet_decrypted.len(), 0);
+ let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
+ chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
+ packet_decrypted = decryption_tmp;
+
+ let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
+
+ if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
+ let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
+ let mut hmac = Hmac::new(Sha256::new(), &um);
+ hmac.input(&err_packet.encode()[32..]);
+ let mut calc_tag = [0u8; 32];
+ hmac.raw_result(&mut calc_tag);
+
+ if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
+ if err_packet.failuremsg.len() < 2 {
+ // Useless packet that we can't use but it passed HMAC, so it
+ // definitely came from the peer in question
+ res = Some((None, !is_from_final_node));
+ } else {
+ let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]);
+
+ match error_code & 0xff {
+ 1|2|3 => {
+ // either from an intermediate or final node
+ // invalid_realm(PERM|1),
+ // temporary_node_failure(NODE|2)
+ // permanent_node_failure(PERM|NODE|2)
+ // required_node_feature_mssing(PERM|NODE|3)
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: error_code & PERM == PERM,
+ }), !(error_code & PERM == PERM && is_from_final_node)));
+ // node returning invalid_realm is removed from network_map,
+ // although NODE flag is not set, TODO: or remove channel only?
+ // retry payment when removed node is not a final node
+ return;
+ },
+ _ => {}
+ }
+
+ if is_from_final_node {
+ let payment_retryable = match error_code {
+ c if c == PERM|15 => false, // unknown_payment_hash
+ c if c == PERM|16 => false, // incorrect_payment_amount
+ 17 => true, // final_expiry_too_soon
+ 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry
+ let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
+ true
+ },
+ 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount
+ let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ true
+ },
+ _ => {
+ // A final node has sent us either an invalid code or an error_code that
+ // MUST be sent from the processing node, or the formmat of failuremsg
+ // does not coform to the spec.
+ // Remove it from the network map and don't may retry payment
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ };
+ res = Some((None, payment_retryable));
+ return;
+ }
+
+ // now, error_code should be only from the intermediate nodes
+ match error_code {
+ _c if error_code & PERM == PERM => {
+ res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
+ short_channel_id: route_hop.short_channel_id,
+ is_permanent: true,
+ }), false));
+ },
+ _c if error_code & UPDATE == UPDATE => {
+ let offset = match error_code {
+ c if c == UPDATE|7 => 0, // temporary_channel_failure
+ c if c == UPDATE|11 => 8, // amount_below_minimum
+ c if c == UPDATE|12 => 8, // fee_insufficient
+ c if c == UPDATE|13 => 4, // incorrect_cltv_expiry
+ c if c == UPDATE|14 => 0, // expiry_too_soon
+ c if c == UPDATE|20 => 2, // channel_disabled
+ _ => {
+ // node sending unknown code
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ };
+
+ if err_packet.failuremsg.len() >= offset + 2 {
+ let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize;
+ if err_packet.failuremsg.len() >= offset + 4 + update_len {
+ if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) {
+ // if channel_update should NOT have caused the failure:
+ // MAY treat the channel_update as invalid.
+ let is_chan_update_invalid = match error_code {
+ c if c == UPDATE|7 => { // temporary_channel_failure
+ false
+ },
+ c if c == UPDATE|11 => { // amount_below_minimum
+ let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat);
+ incoming_htlc_msat > chan_update.contents.htlc_minimum_msat
+ },
+ c if c == UPDATE|12 => { // fee_insufficient
+ let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
+ onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat);
+ new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap()
+ }
+ c if c == UPDATE|13 => { // incorrect_cltv_expiry
+ let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
+ onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry);
+ route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta
+ },
+ c if c == UPDATE|20 => { // channel_disabled
+ let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]);
+ onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags);
+ chan_update.contents.flags & 0x01 == 0x01
+ },
+ c if c == UPDATE|21 => true, // expiry_too_far
+ _ => { unreachable!(); },
+ };
+
+ let msg = if is_chan_update_invalid { None } else {
+ Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
+ msg: chan_update,
+ })
+ };
+ res = Some((msg, true));
+ return;
+ }
+ }
+ }
+ },
+ _c if error_code & BADONION == BADONION => {
+ //TODO
+ },
+ 14 => { // expiry_too_soon
+ res = Some((None, true));
+ return;
+ }
+ _ => {
+ // node sending unknown code
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ }
+ }
+ }
+ }
+ }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
+ res.unwrap_or((None, true))
+ } else { ((None, true)) }
+ }
+
+ fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
- let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
+ match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
//TODO: here and below MsgHandleErrInternal, #153 case
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}?;
-
- match htlc_source {
- &HTLCSource::OutboundRoute { ref route, ref session_priv, .. } => {
- // Handle packed channel/node updates for passing back for the route handler
- let mut packet_decrypted = msg.reason.data.clone();
- let mut res = None;
- Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| {
- if res.is_some() { return; }
-
- let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
-
- let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
- decryption_tmp.resize(packet_decrypted.len(), 0);
- let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
- chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
- packet_decrypted = decryption_tmp;
-
- if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
- if err_packet.failuremsg.len() >= 2 {
- let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
-
- let mut hmac = Hmac::new(Sha256::new(), &um);
- hmac.input(&err_packet.encode()[32..]);
- let mut calc_tag = [0u8; 32];
- hmac.raw_result(&mut calc_tag);
- if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
- const UNKNOWN_CHAN: u16 = 0x4000|10;
- const TEMP_CHAN_FAILURE: u16 = 0x4000|7;
- match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) {
- TEMP_CHAN_FAILURE => {
- if err_packet.failuremsg.len() >= 4 {
- let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize;
- if err_packet.failuremsg.len() >= 4 + update_len {
- if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[4..4 + update_len])) {
- res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
- msg: chan_update,
- });
- }
- }
- }
- },
- UNKNOWN_CHAN => {
- // No such next-hop. We know this came from the
- // current node as the HMAC validated.
- res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
- short_channel_id: route_hop.short_channel_id
- });
- },
- _ => {}, //TODO: Enumerate all of these!
- }
- }
- }
- }
- }).unwrap();
- Ok(res)
- },
- _ => { Ok(None) },
- }
+ Ok(())
}
fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
match channel_state.by_id.get_mut(&channel_id) {
None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
Some(chan) => {
- if !chan.is_usable() {
- return Err(APIError::APIMisuseError{err: "Channel is not in usuable state"});
- }
if !chan.is_outbound() {
return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
}
+ if !chan.is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
+ }
if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
unimplemented!();
handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
}
- fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
+ fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
}
use chain::transaction::OutPoint;
use chain::chaininterface::ChainListener;
use ln::channelmanager::{ChannelManager,OnionKeys};
+ use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
use ln::router::{Route, RouteHop, Router};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
use std::default::Default;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
+ use std::sync::atomic::Ordering;
use std::time::Instant;
use std::mem;
}
fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
- node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap();
+ create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
+ }
+
+ fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
+ (announcement, as_update, bs_update, channel_id, tx)
+ }
+
+ fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
+ node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
let events_1 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_1.len(), 1);
assert_eq!(events_2.len(), 1);
match events_2[0] {
Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
- assert_eq!(*channel_value_satoshis, 100000);
+ assert_eq!(*channel_value_satoshis, channel_value);
assert_eq!(user_channel_id, 42);
tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
_ => panic!("Unexpected event"),
};
- confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
- let events_5 = node_a.node.get_and_clear_pending_events();
+ tx
+ }
+
+ fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+ confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
+ let events_5 = node_b.node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
match events_5[0] {
Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
- assert_eq!(*node_id, node_b.node.get_our_node_id());
+ assert_eq!(*node_id, node_a.node.get_our_node_id());
assert!(announcement_sigs.is_none());
- node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap()
+ node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap()
},
_ => panic!("Unexpected event"),
};
let channel_id;
- confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
- let events_6 = node_b.node.get_and_clear_pending_events();
+ confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
+ let events_6 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_6.len(), 1);
- let as_announcement_sigs = match events_6[0] {
+ (match events_6[0] {
Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
- assert_eq!(*node_id, node_a.node.get_our_node_id());
channel_id = msg.channel_id.clone();
- let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap();
- node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
- as_announcement_sigs
+ assert_eq!(*node_id, node_b.node.get_our_node_id());
+ (msg.clone(), announcement_sigs.clone().unwrap())
},
_ => panic!("Unexpected event"),
+ }, channel_id)
+ }
+
+ fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+ let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
+ let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
+ (msgs, chan_id, tx)
+ }
+
+ fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
+ let bs_announcement_sigs = {
+ let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap();
+ node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
+ bs_announcement_sigs
};
- let events_7 = node_a.node.get_and_clear_pending_events();
+ let events_7 = node_b.node.get_and_clear_pending_events();
assert_eq!(events_7.len(), 1);
- let (announcement, as_update) = match events_7[0] {
+ let (announcement, bs_update) = match events_7[0] {
Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
(msg, update_msg)
},
_ => panic!("Unexpected event"),
};
- node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap();
- let events_8 = node_b.node.get_and_clear_pending_events();
+ node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
+ let events_8 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_8.len(), 1);
- let bs_update = match events_8[0] {
+ let as_update = match events_8[0] {
Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
assert!(*announcement == *msg);
update_msg
*node_a.network_chan_count.borrow_mut() += 1;
- ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx)
+ ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
}
fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
- let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]);
+ create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
+ }
+
+ fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
for node in nodes {
assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
node.router.handle_channel_update(&chan_announcement.1).unwrap();
let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
match err {
- APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
_ => panic!("Unknown error variants"),
};
}
let events = origin_node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentFailed { payment_hash } => {
+ Event::PaymentFailed { payment_hash, rejected_by_dest } => {
assert_eq!(payment_hash, our_payment_hash);
+ assert!(rejected_by_dest);
},
_ => panic!("Unexpected event"),
}
}
}
+ #[test]
+ fn channel_reserve_test() {
+ use util::rng;
+ use std::sync::atomic::Ordering;
+ use ln::msgs::HandleError;
+
+ macro_rules! get_channel_value_stat {
+ ($node: expr, $channel_id: expr) => {{
+ let chan_lock = $node.node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+ chan.get_value_stat()
+ }}
+ }
+
+ let mut nodes = create_network(3);
+ let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
+
+ let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
+ let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
+
+ let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
+ let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
+
+ macro_rules! get_route_and_payment_hash {
+ ($recv_value: expr) => {{
+ let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ (route, payment_hash, payment_preimage)
+ }}
+ };
+
+ macro_rules! expect_pending_htlcs_forwardable {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
+ $node.node.process_pending_htlc_forwards();
+ }}
+ };
+
+ macro_rules! expect_forward {
+ ($node: expr) => {{
+ let mut events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ check_added_monitors!($node, 1);
+ let payment_event = SendEvent::from_event(events.remove(0));
+ payment_event
+ }}
+ }
+
+ macro_rules! expect_payment_received {
+ ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!($expected_payment_hash, *payment_hash);
+ assert_eq!($expected_recv_value, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ };
+
+ let feemsat = 239; // somehow we know?
+ let total_fee_msat = (nodes.len() - 2) as u64 * 239;
+
+ let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
+
+ // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+ assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+ let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let mut htlc_id = 0;
+ // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
+ // nodes[0]'s wealth
+ loop {
+ let amt_msat = recv_value_0 + total_fee_msat;
+ if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
+ break;
+ }
+ send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+ htlc_id += 1;
+
+ let (stat01_, stat11_, stat12_, stat22_) = (
+ get_channel_value_stat!(nodes[0], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_2.2),
+ get_channel_value_stat!(nodes[2], chan_2.2),
+ );
+
+ assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
+ assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
+ assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
+ assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
+ stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
+ }
+
+ {
+ let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
+ // attempt to get channel_reserve violation
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
+ let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ // adding pending output
+ let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
+ let amt_msat_1 = recv_value_1 + total_fee_msat;
+
+ let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
+ let payment_event_1 = {
+ nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
+
+ // channel reserve test with htlc pending output > 0
+ let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ {
+ // test channel_reserve test on nodes[1] side
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+
+ // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
+ let secp_ctx = Secp256k1::new();
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+ let msg = msgs::UpdateAddHTLC {
+ channel_id: chan_1.2,
+ htlc_id,
+ amount_msat: htlc_msat,
+ payment_hash: our_payment_hash,
+ cltv_expiry: htlc_cltv,
+ onion_routing_packet: onion_packet,
+ };
+
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
+ match err {
+ HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
+ }
+ }
+
+ // split the rest to test holding cell
+ let recv_value_21 = recv_value_2/2;
+ let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
+ {
+ let stat = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
+ }
+
+ // now see if they go through on both sides
+ let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
+ // but this will stuck in the holding cell
+ nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 0);
+
+ // test with outbound holding cell amount > 0
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
+ // this will also stuck in the holding cell
+ nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 0);
+
+ // flush the pending htlc
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+ assert!(bs_none.is_none());
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_11 = expect_forward!(nodes[1]);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
+
+ // flush the htlcs in the holding cell
+ assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_3 = expect_forward!(nodes[1]);
+ assert_eq!(payment_event_3.msgs.len(), 2);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
+
+ commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_21, *payment_hash);
+ assert_eq!(recv_value_21, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_22, *payment_hash);
+ assert_eq!(recv_value_22, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
+
+ let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
+ let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
+ assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
+
+ let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
+ assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
+ }
+
#[test]
fn channel_monitor_network_test() {
// Simple test which builds a network of ChannelManagers, connects them to each other, and
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
+ { // Cheat and reset nodes[4]'s height to 1
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
+ }
+
+ assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
+ assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
// One pending HTLC to time out:
let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
+ // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
+ // buffer space).
{
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- nodes[3].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
- for i in 2..TEST_FINAL_CLTV - 3 {
+ nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
}
claim_funds!(nodes[4], nodes[3], payment_preimage_2);
header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- nodes[4].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
- for i in 2..TEST_FINAL_CLTV - 3 {
+ nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
}
assert_eq!(channel_state.short_to_id.len(), 0);
}
- fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize)) {
+ /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
+ /// for claims/fails they are separated out.
+ fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id());
let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id());
for msg in reestablish_1 {
resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap());
}
- if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
+ if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
check_added_monitors!(node_b, 1);
} else {
check_added_monitors!(node_b, 0);
for msg in reestablish_2 {
resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap());
}
- if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
+ if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
check_added_monitors!(node_a, 1);
} else {
check_added_monitors!(node_a, 0);
}
// We dont yet support both needing updates, as that would require a different commitment dance:
- assert!((pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0) || (pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0));
+ assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
+ (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
for chan_msgs in resp_1.drain(..) {
if pre_all_htlcs {
- let _announcement_sigs_opt = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
+ let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
+ let _announcement_sigs_opt = a.unwrap();
//TODO: Test announcement_sigs re-sending when we've implemented it
} else {
assert!(chan_msgs.0.is_none());
}
- assert!(chan_msgs.1.is_none());
- if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
+ if pending_raa.0 {
+ assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+ check_added_monitors!(node_a, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
let commitment_update = chan_msgs.2.unwrap();
- assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
- assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
- assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
+ } else {
+ assert!(commitment_update.update_add_htlcs.is_empty());
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
+ }
for update_fulfill in commitment_update.update_fulfill_htlcs {
node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
}
node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
}
- commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+ } else {
+ let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_a, 1);
+ assert!(as_commitment_signed.is_none());
+ assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(node_b, 1);
+ }
} else {
assert!(chan_msgs.2.is_none());
}
} else {
assert!(chan_msgs.0.is_none());
}
- assert!(chan_msgs.1.is_none());
- if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
+ if pending_raa.1 {
+ assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+ check_added_monitors!(node_b, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
let commitment_update = chan_msgs.2.unwrap();
- assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
- assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
- assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
+ }
for update_fulfill in commitment_update.update_fulfill_htlcs {
node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
}
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
}
- commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+ } else {
+ let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_b, 1);
+ assert!(bs_commitment_signed.is_none());
+ assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(node_a, 1);
+ }
} else {
assert!(chan_msgs.2.is_none());
}
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0));
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0));
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
- reconnect_nodes(&nodes[0], &nodes[1], false, (1, 0), (1, 0));
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
{
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentFailed { payment_hash } => {
+ Event::PaymentFailed { payment_hash, rejected_by_dest } => {
assert_eq!(payment_hash, payment_hash_5);
+ assert!(rejected_by_dest);
},
_ => panic!("Unexpected event"),
}
fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
}
+ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
+ // Test that we can reconnect when in-flight HTLC updates get dropped
+ let mut nodes = create_network(2);
+ if messages_delivered == 0 {
+ create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
+ // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
+ } else {
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ }
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ let payment_event = {
+ nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
+
+ if messages_delivered < 2 {
+ // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
+ } else {
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 3 {
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 4 {
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap();
+ assert!(as_commitment_signed.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 5 {
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 2 {
+ // Even if the funding_locked messages get exchanged, as long as nothing further was
+ // received on either side, both sides will need to resend them.
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 2 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 4 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 5 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ let events_1 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+ nodes[1].node.process_pending_htlc_forwards();
+
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[1].node.claim_funds(payment_preimage_1);
+ check_added_monitors!(nodes[1], 1);
+
+ let events_3 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
+ Event::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if messages_delivered >= 1 {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
+
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if messages_delivered >= 2 {
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 3 {
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 4 {
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+ assert!(bs_commitment_signed.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 5 {
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+ }
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 2 {
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+ //TODO: Deduplicate PaymentSent events, then enable this if:
+ //if messages_delivered < 1 {
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ //}
+ } else if messages_delivered == 2 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 4 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 5 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // Channel should still work fine...
+ let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ }
+
+ #[test]
+ fn test_drop_messages_peer_disconnect_a() {
+ do_test_drop_messages_peer_disconnect(0);
+ do_test_drop_messages_peer_disconnect(1);
+ do_test_drop_messages_peer_disconnect(2);
+ }
+
+ #[test]
+ fn test_drop_messages_peer_disconnect_b() {
+ do_test_drop_messages_peer_disconnect(3);
+ do_test_drop_messages_peer_disconnect(4);
+ do_test_drop_messages_peer_disconnect(5);
+ }
+
+ #[test]
+ fn test_funding_peer_disconnect() {
+ // Test that we can lock in our funding tx while disconnected
+ let nodes = create_network(2);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert!(announcement_sigs.is_none());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(announcement_sigs.is_none());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
+ // rebroadcasting announcement_signatures upon reconnect.
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+ }
+
#[test]
fn test_invalid_channel_announcement() {
//Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
- let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
let unsigned_msg = dummy_unsigned_msg!();
sign_msg!(unsigned_msg);
assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
- let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
// Configured with Network::Testnet
let mut unsigned_msg = dummy_unsigned_msg!();