X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=d6ba67c2b36dcf24d23e3c880ccf64ddcb5a7b19;hb=3bcd911fcb112551525521dc3d16b88e07314318;hp=5e5d531fa4b1eaa578469e4dad611107af33d687;hpb=ba30061c87fca9d808e9e13569b8ea914503fae6;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 5e5d531f..d6ba67c2 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -23,10 +23,10 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError, ChannelKeys}; -use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; +use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route,RouteHop}; use ln::msgs; -use ln::msgs::{HandleError,ChannelMessageHandler}; +use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder}; use util::{byte_utils, events, internal_traits, rng}; use util::sha2::Sha256; use util::ser::{Readable, Writeable}; @@ -105,6 +105,9 @@ mod channel_held_info { OutboundRoute { route: Route, session_priv: SecretKey, + /// Technically we can recalculate this from the route, but we cache it here to avoid + /// doing a double-pass on route when we get a failure back + first_hop_htlc_msat: u64, }, } #[cfg(test)] @@ -113,6 +116,7 @@ mod channel_held_info { HTLCSource::OutboundRoute { route: Route { hops: Vec::new() }, session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(), + first_hop_htlc_msat: 0, } } } @@ -296,6 +300,7 @@ pub struct ChannelManager { /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO? +const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that // if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have @@ -581,6 +586,33 @@ impl ChannelManager { } } + fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { + match err { + ChannelMonitorUpdateErr::PermanentFailure => { + let mut chan = { + let channel_state = channel_state_lock.borrow_parts(); + let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + if let Some(short_id) = chan.get_short_channel_id() { + channel_state.short_to_id.remove(&short_id); + } + chan + }; + mem::drop(channel_state_lock); + self.finish_force_close_channel(chan.force_shutdown()); + let mut events = self.pending_events.lock().unwrap(); + if let Ok(update) = self.get_channel_update(&chan) { + events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + channel.monitor_update_failed(reason); + }, + } + } + #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) { ({ @@ -896,13 +928,17 @@ impl ChannelManager { } }; - //TODO: Check that msg.cltv_expiry is within acceptable bounds! - let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! - if next_hop_data.data.amt_to_forward != msg.amount_msat { + // final_expiry_too_soon + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); + } + // final_incorrect_htlc_amount + if next_hop_data.data.amt_to_forward > msg.amount_msat { return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); } + // final_incorrect_cltv_expiry if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry { return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } @@ -967,29 +1003,54 @@ impl ChannelManager { if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); let forwarding_id = match id_option { - None => { + None => { // unknown_next_peer return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]); }, Some(id) => id.clone(), }; - if let Some((err, code, chan_update)) = { + if let Some((err, code, chan_update)) = loop { let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); - if !chan.is_live() { - Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap())) - } else { - let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); - if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { - Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap())) - } else { - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { - Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap())) - } else { - None - } - } + + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. + if !chan.is_live() { // channel_disabled + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); + } + if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); + } + let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); + if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient + break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); + } + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); + } + let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; + // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } - } { - return_err!(err, code, &chan_update.encode_with_len()[..]); + if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far + break Some(("CLTV expiry is too far in the future", 21, None)); + } + break None; + } + { + let mut res = Vec::with_capacity(8 + 128); + if code == 0x1000 | 11 || code == 0x1000 | 12 { + res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat)); + } + else if code == 0x1000 | 13 { + res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry)); + } + if let Some(chan_update) = chan_update { + res.extend_from_slice(&chan_update.encode_with_len()[..]); + } + return_err!(err, code, &res[..]); } } } @@ -998,6 +1059,7 @@ impl ChannelManager { } /// only fails if the channel does not yet have an assigned short_id + /// May be called with channel_state already locked! fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { None => return Err(HandleError{err: "Channel not yet established", action: None}), @@ -1067,9 +1129,8 @@ impl ChannelManager { let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?; let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); - let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let (first_hop_node_id, update_add, commitment_signed) = { + let mut channel_state = self.channel_state.lock().unwrap(); let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), @@ -1077,31 +1138,45 @@ impl ChannelManager { }; let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + let res = { + let chan = channel_state.by_id.get_mut(&id).unwrap(); + if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + } + chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? + }; + match res { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); + return Err(APIError::MonitorUpdateFailed); + } + Some((update_add, commitment_signed)) + }, + None => None, } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), - session_priv: session_priv.clone(), - }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? }; let first_hop_node_id = route.hops.first().unwrap().pubkey; match res { - Some(msgs) => (first_hop_node_id, msgs), + Some((update_add, commitment_signed)) => { + (first_hop_node_id, update_add, commitment_signed) + }, None => return Ok(()), } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - let mut events = self.pending_events.lock().unwrap(); events.push(events::Event::UpdateHTLCs { node_id: first_hop_node_id, @@ -1154,7 +1229,9 @@ impl ChannelManager { }, None => return } - }; // Release channel lock for install_watch_outpoint call, + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } @@ -1269,7 +1346,10 @@ impl ChannelManager { continue; }, }; - new_events.push((Some(monitor), events::Event::UpdateHTLCs { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!();// but def dont push the event... + } + new_events.push(events::Event::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), updates: msgs::CommitmentUpdate { update_add_htlcs: add_htlc_msgs, @@ -1279,7 +1359,7 @@ impl ChannelManager { update_fee: None, commitment_signed: commitment_msg, }, - })); + }); } } else { for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { @@ -1292,10 +1372,10 @@ impl ChannelManager { hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, }; - new_events.push((None, events::Event::PaymentReceived { + new_events.push(events::Event::PaymentReceived { payment_hash: forward_info.payment_hash, amt: forward_info.amt_to_forward, - })); + }); } } } @@ -1309,25 +1389,14 @@ impl ChannelManager { } if new_events.is_empty() { return } - - new_events.retain(|event| { - if let &Some(ref monitor) = &event.0 { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) { - unimplemented!();// but def dont push the event... - } - } - true - }); - let mut events = self.pending_events.lock().unwrap(); - events.reserve(new_events.len()); - for event in new_events.drain(..) { - events.push(event.1); - } + events.append(&mut new_events); } /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event. pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool { + // TODO: Add ability to return 0x4000|16 (incorrect_payment_amount) if the amount we + // received is < expected or > 2*expected let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); if let Some(mut sources) = removed_source { @@ -1349,11 +1418,21 @@ impl ChannelManager { match source { HTLCSource::OutboundRoute { .. } => { mem::drop(channel_state); - - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentFailed { - payment_hash: payment_hash.clone() - }); + if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error { + let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone()); + let mut pending_events = self.pending_events.lock().unwrap(); + if let Some(channel_update) = channel_update { + pending_events.push(events::Event::PaymentFailureNetworkUpdate { + update: channel_update, + }); + } + pending_events.push(events::Event::PaymentFailed { + payment_hash: payment_hash.clone(), + rejected_by_dest: !payment_retryable, + }); + } else { + panic!("should have onion error packet here"); + } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => { let err_packet = match onion_error { @@ -1374,7 +1453,13 @@ impl ChannelManager { let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { - Ok(msg) => (chan.get_their_node_id(), msg), + Ok(Some((msg, commitment_msg, chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_their_node_id(), Some((msg, commitment_msg))) + }, + Ok(None) => (chan.get_their_node_id(), None), Err(_e) => { //TODO: Do something with e? return; @@ -1383,13 +1468,9 @@ impl ChannelManager { }; match fail_msgs { - Some((msg, commitment_msg, chan_monitor)) => { + Some((msg, commitment_msg)) => { mem::drop(channel_state); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id, @@ -1454,7 +1535,13 @@ impl ChannelManager { let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok(msg) => (chan.get_their_node_id(), msg), + Ok((msgs, Some(chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!();// but def dont push the event... + } + (chan.get_their_node_id(), msgs) + }, + Ok((msgs, None)) => (chan.get_their_node_id(), msgs), Err(_e) => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel may be about to hit the chain. @@ -1465,13 +1552,7 @@ impl ChannelManager { }; mem::drop(channel_state); - if let Some(chan_monitor) = fulfill_msgs.1 { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - } - - if let Some((msg, commitment_msg)) = fulfill_msgs.0 { + if let Some((msg, commitment_msg)) = fulfill_msgs { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id: node_id, @@ -1498,7 +1579,83 @@ impl ChannelManager { /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update /// operation. pub fn test_restore_channel_monitor(&self) { - unimplemented!(); + let mut new_events = Vec::new(); + let mut close_results = Vec::new(); + let mut htlc_forwards = Vec::new(); + let mut htlc_failures = Vec::new(); + + { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + let short_to_id = channel_state.short_to_id; + channel_state.by_id.retain(|_, channel| { + if channel.is_awaiting_monitor_update() { + let chan_monitor = channel.channel_monitor(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + if let Some(short_id) = channel.get_short_channel_id() { + short_to_id.remove(&short_id); + } + close_results.push(channel.force_shutdown()); + if let Ok(update) = self.get_channel_update(&channel) { + new_events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + false + }, + ChannelMonitorUpdateErr::TemporaryFailure => true, + } + } else { + let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored(); + if !pending_forwards.is_empty() { + htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); + } + htlc_failures.append(&mut pending_failures); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + new_events.push(events::Event::UpdateHTLCs { + node_id: channel.get_their_node_id(), + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + new_events.push(events::Event::SendRevokeAndACK { + node_id: channel.get_their_node_id(), + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + true + } + } else { true } + }); + } + + for failure in htlc_failures.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + } + self.forward_htlcs(&mut htlc_forwards[..]); + + for res in close_results.drain(..) { + self.finish_force_close_channel(res); + } + + self.pending_events.lock().unwrap().append(&mut new_events); } fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result { @@ -1584,10 +1741,9 @@ impl ChannelManager { }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } - }; // Release channel lock for install_watch_outpoint call, - // note that this means if the remote end is misbehaving and sends a message for the same - // channel back-to-back with funding_created, we'll end up thinking they sent a message - // for a bogus channel. + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } @@ -1604,7 +1760,7 @@ impl ChannelManager { } fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { - let (funding_txo, user_id, monitor) = { + let (funding_txo, user_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1613,14 +1769,14 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor) + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_funding_txo().unwrap(), chan.get_user_id()) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); - } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::FundingBroadcastSafe { funding_txo: funding_txo, @@ -1767,9 +1923,206 @@ impl ChannelManager { Ok(()) } - fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, MsgHandleErrInternal> { + // Process failure we got back from upstream on a payment we sent. Returns update and a boolean + // indicating that the payment itself failed + fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec) -> (Option, bool) { + if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source { + macro_rules! onion_failure_log { + ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => { + log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value); + }; + ( $error_code_textual: expr, $error_code: expr ) => { + log_trace!(self, "{}({})", $error_code_textual, $error_code); + }; + } + + const BADONION: u16 = 0x8000; + const PERM: u16 = 0x4000; + const UPDATE: u16 = 0x1000; + + let mut res = None; + let mut htlc_msat = *first_hop_htlc_msat; + + // Handle packed channel/node updates for passing back for the route handler + Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| { + if res.is_some() { return; } + + let incoming_htlc_msat = htlc_msat; + let amt_to_forward = htlc_msat - route_hop.fee_msat; + htlc_msat = amt_to_forward; + + let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); + + let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); + decryption_tmp.resize(packet_decrypted.len(), 0); + let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); + chacha.process(&packet_decrypted, &mut decryption_tmp[..]); + packet_decrypted = decryption_tmp; + + let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey; + + if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { + let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); + let mut hmac = Hmac::new(Sha256::new(), &um); + hmac.input(&err_packet.encode()[32..]); + let mut calc_tag = [0u8; 32]; + hmac.raw_result(&mut calc_tag); + + if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { + if err_packet.failuremsg.len() < 2 { + // Useless packet that we can't use but it passed HMAC, so it + // definitely came from the peer in question + res = Some((None, !is_from_final_node)); + } else { + let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]); + + match error_code & 0xff { + 1|2|3 => { + // either from an intermediate or final node + // invalid_realm(PERM|1), + // temporary_node_failure(NODE|2) + // permanent_node_failure(PERM|NODE|2) + // required_node_feature_mssing(PERM|NODE|3) + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: error_code & PERM == PERM, + }), !(error_code & PERM == PERM && is_from_final_node))); + // node returning invalid_realm is removed from network_map, + // although NODE flag is not set, TODO: or remove channel only? + // retry payment when removed node is not a final node + return; + }, + _ => {} + } + + if is_from_final_node { + let payment_retryable = match error_code { + c if c == PERM|15 => false, // unknown_payment_hash + c if c == PERM|16 => false, // incorrect_payment_amount + 17 => true, // final_expiry_too_soon + 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry + let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]); + true + }, + 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount + let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + true + }, + _ => { + // A final node has sent us either an invalid code or an error_code that + // MUST be sent from the processing node, or the formmat of failuremsg + // does not coform to the spec. + // Remove it from the network map and don't may retry payment + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; + } + }; + res = Some((None, payment_retryable)); + return; + } + + // now, error_code should be only from the intermediate nodes + match error_code { + _c if error_code & PERM == PERM => { + res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed { + short_channel_id: route_hop.short_channel_id, + is_permanent: true, + }), false)); + }, + _c if error_code & UPDATE == UPDATE => { + let offset = match error_code { + c if c == UPDATE|7 => 0, // temporary_channel_failure + c if c == UPDATE|11 => 8, // amount_below_minimum + c if c == UPDATE|12 => 8, // fee_insufficient + c if c == UPDATE|13 => 4, // incorrect_cltv_expiry + c if c == UPDATE|14 => 0, // expiry_too_soon + c if c == UPDATE|20 => 2, // channel_disabled + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; + } + }; + + if err_packet.failuremsg.len() >= offset + 2 { + let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize; + if err_packet.failuremsg.len() >= offset + 4 + update_len { + if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) { + // if channel_update should NOT have caused the failure: + // MAY treat the channel_update as invalid. + let is_chan_update_invalid = match error_code { + c if c == UPDATE|7 => { // temporary_channel_failure + false + }, + c if c == UPDATE|11 => { // amount_below_minimum + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat); + incoming_htlc_msat > chan_update.contents.htlc_minimum_msat + }, + c if c == UPDATE|12 => { // fee_insufficient + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) }); + onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat); + new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap() + } + c if c == UPDATE|13 => { // incorrect_cltv_expiry + let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]); + onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry); + route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta + }, + c if c == UPDATE|20 => { // channel_disabled + let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]); + onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags); + chan_update.contents.flags & 0x01 == 0x01 + }, + c if c == UPDATE|21 => true, // expiry_too_far + _ => { unreachable!(); }, + }; + + let msg = if is_chan_update_invalid { None } else { + Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { + msg: chan_update, + }) + }; + res = Some((msg, true)); + return; + } + } + } + }, + _c if error_code & BADONION == BADONION => { + //TODO + }, + 14 => { // expiry_too_soon + res = Some((None, true)); + return; + } + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; + } + } + } + } + } + }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?"); + res.unwrap_or((None, true)) + } else { ((None, true)) } + } + + fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { + match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { if chan.get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case @@ -1780,65 +2133,7 @@ impl ChannelManager { }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }?; - - match htlc_source { - &HTLCSource::OutboundRoute { ref route, ref session_priv, .. } => { - // Handle packed channel/node updates for passing back for the route handler - let mut packet_decrypted = msg.reason.data.clone(); - let mut res = None; - Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| { - if res.is_some() { return; } - - let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); - - let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); - decryption_tmp.resize(packet_decrypted.len(), 0); - let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); - chacha.process(&packet_decrypted, &mut decryption_tmp[..]); - packet_decrypted = decryption_tmp; - - if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { - if err_packet.failuremsg.len() >= 2 { - let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); - - let mut hmac = Hmac::new(Sha256::new(), &um); - hmac.input(&err_packet.encode()[32..]); - let mut calc_tag = [0u8; 32]; - hmac.raw_result(&mut calc_tag); - if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { - const UNKNOWN_CHAN: u16 = 0x4000|10; - const TEMP_CHAN_FAILURE: u16 = 0x4000|7; - match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) { - TEMP_CHAN_FAILURE => { - if err_packet.failuremsg.len() >= 4 { - let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize; - if err_packet.failuremsg.len() >= 4 + update_len { - if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[4..4 + update_len])) { - res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { - msg: chan_update, - }); - } - } - } - }, - UNKNOWN_CHAN => { - // No such next-hop. We know this came from the - // current node as the HMAC validated. - res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed { - short_channel_id: route_hop.short_channel_id, - is_permanent: true, - }); - }, - _ => {}, //TODO: Enumerate all of these! - } - } - } - } - }).unwrap(); - Ok(res) - }, - _ => { Ok(None) }, - } + Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { @@ -1861,7 +2156,7 @@ impl ChannelManager { } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option), MsgHandleErrInternal> { - let (revoke_and_ack, commitment_signed, chan_monitor) = { + let (revoke_and_ack, commitment_signed) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1869,20 +2164,53 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))? + let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (revoke_and_ack, commitment_signed) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - Ok((revoke_and_ack, commitment_signed)) } + #[inline] + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + let mut forward_event = None; + if !pending_forwards.is_empty() { + let mut channel_state = self.channel_state.lock().unwrap(); + if channel_state.forward_htlcs.is_empty() { + forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + channel_state.next_forward = forward_event.unwrap(); + } + for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { + match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info })); + } + } + } + } + match forward_event { + Some(time) => { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PendingHTLCsForwardable { + time_forwardable: time + }); + } + None => {}, + } + } + } + fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result, MsgHandleErrInternal> { - let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = { + let ((res, pending_forwards, mut pending_failures), short_channel_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1890,45 +2218,19 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } for failure in pending_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } - - let mut forward_event = None; - if !pending_forwards.is_empty() { - let mut channel_state = self.channel_state.lock().unwrap(); - if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state.next_forward = forward_event.unwrap(); - } - for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info }); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info })); - } - } - } - } - match forward_event { - Some(time) => { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PendingHTLCsForwardable { - time_forwardable: time - }); - } - None => {}, - } + self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]); Ok(res) } @@ -1987,26 +2289,27 @@ impl ChannelManager { Ok(()) } - fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option), MsgHandleErrInternal> { - let (res, chan_monitor) = { + fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), MsgHandleErrInternal> { + let res = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { if chan.get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg) + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor) + if let Some(monitor) = channel_monitor { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!(); + } + } + Ok((funding_locked, revoke_and_ack, commitment_update, order)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Some(monitor) = chan_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); - } - } + res } @@ -2023,6 +2326,9 @@ impl ChannelManager { if !chan.is_outbound() { return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } if !chan.is_live() { return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); } @@ -2243,7 +2549,7 @@ impl ChannelMessageHandler for ChannelManager { handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, HandleError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) } @@ -2267,7 +2573,7 @@ impl ChannelMessageHandler for ChannelManager { handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option), HandleError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), HandleError> { handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) } @@ -2372,7 +2678,7 @@ mod tests { use chain::transaction::OutPoint; use chain::chaininterface::ChainListener; use ln::channelmanager::{ChannelManager,OnionKeys}; - use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; + use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route, RouteHop, Router}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; @@ -2837,15 +3143,17 @@ mod tests { commitment_msg: msgs::CommitmentSigned, } impl SendEvent { + fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent { + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } + } + fn from_event(event: Event) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { - assert!(update_fulfill_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed } - }, + Event::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), _ => panic!("Unexpected event type!"), } } @@ -3115,8 +3423,9 @@ mod tests { let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash } => { + Event::PaymentFailed { payment_hash, rejected_by_dest } => { assert_eq!(payment_hash, our_payment_hash); + assert!(rejected_by_dest); }, _ => panic!("Unexpected event"), } @@ -3933,6 +4242,19 @@ mod tests { } } + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + } + #[test] fn channel_reserve_test() { use util::rng; @@ -3965,19 +4287,6 @@ mod tests { }} }; - macro_rules! expect_pending_htlcs_forwardable { - ($node: expr) => {{ - let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; - $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); - $node.node.process_pending_htlc_forwards(); - }} - }; - macro_rules! expect_forward { ($node: expr) => {{ let mut events = $node.node.get_and_clear_pending_events(); @@ -4756,6 +5065,7 @@ mod tests { assert!(chan_msgs.0.is_none()); } if pending_raa.0 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); check_added_monitors!(node_a, 1); } else { @@ -4803,6 +5113,7 @@ mod tests { assert!(chan_msgs.0.is_none()); } if pending_raa.1 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); check_added_monitors!(node_b, 1); } else { @@ -4883,8 +5194,9 @@ mod tests { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash } => { + Event::PaymentFailed { payment_hash, rejected_by_dest } => { assert_eq!(payment_hash, payment_hash_5); + assert!(rejected_by_dest); }, _ => panic!("Unexpected event"), } @@ -5133,6 +5445,582 @@ mod tests { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } + #[test] + fn test_drop_messages_peer_disconnect_dual_htlc() { + // Test that we can handle reconnecting when both sides of a channel have pending + // commitment_updates when we disconnect. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(commitment_update.is_none()); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + + assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1); + assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap(); + assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + + let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_signed.update_add_htlcs.is_empty()); + assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); + assert!(as_second_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + let events_4 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt: _ } => { + assert_eq!(payment_hash_2, *payment_hash); + }, + _ => panic!("Unexpected event"), + } + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_simple_monitor_permanent_update_fail() { + // Test that we handle a simple permanent monitor update failure + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + }; + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { + // Test that we can recover from a simple temporary monitor update failure optionally with + // a disconnect in between + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let mut events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let payment_event = SendEvent::from_event(events_2.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + + // Now set it to failed again... + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let events_5 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + #[test] + fn test_simple_monitor_temporary_update_fail() { + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); + } + + fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { + let disconnect_flags = 8 | 16; + + // Test that we can recover from a temporary monitor update failure with some in-flight + // HTLCs going on at the same time potentially with some disconnection thrown in. + // * First we route a payment, then get a temporary monitor update failure when trying to + // route a second payment. We then claim the first payment. + // * If disconnect_count is set, we will disconnect at this point (which is likely as + // TemporaryFailure likely indicates net disconnect which resulted in failing to update + // the ChannelMonitor on a watchtower). + // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment + // immediately, otherwise we wait sconnect and deliver them via the reconnect + // channel_reestablish processing (ie disconnect_count & 16 makes no sense if + // disconnect_count & !disconnect_flags is 0). + // * We then update the channel monitor, reconnecting if disconnect_count is set and walk + // through message sending, potentially disconnect/reconnecting multiple times based on + // disconnect_count, to get the update_fulfill_htlc through. + // * We then walk through more message exchanges to get the original update_add_htlc + // through, swapping message ordering based on disconnect_count & 8 and optionally + // disconnect/reconnecting based on disconnect_count. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] + // but nodes[0] won't respond since it is frozen. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + if (disconnect_count & 16) == 0 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + } + + (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if disconnect_count & !disconnect_flags > 0 { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + } + + // Now fix monitor updating... + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + macro_rules! disconnect_reconnect_peers { () => { { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + } } } + + let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let mut as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + check_added_monitors!(nodes[0], 0); + let mut bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + if (disconnect_count & 16) == 0 { + assert!(bs_resp.2.is_none()); + + assert!(as_resp.1.is_some()); + assert!(as_resp.2.is_some()); + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + } else { + assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); + assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); + assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + + assert!(as_resp.1.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (as_resp_raa, as_resp_cu) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(as_resp_cu.is_none()); + check_added_monitors!(nodes[0], 1); + + as_resp.1 = Some(as_resp_raa); + bs_resp.2 = None; + } + + if disconnect_count & !disconnect_flags > 1 { + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + + if (disconnect_count & 16) == 0 { + assert!(reestablish_1 == second_reestablish_1); + assert!(reestablish_2 == second_reestablish_2); + } + assert!(as_resp == second_as_resp); + assert!(bs_resp == second_bs_resp); + } + + (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) + } else { + let mut events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 2); + (SendEvent::from_event(events_4.remove(0)), match events_4[0] { + Event::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), + }) + }; + + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting an RAA from nodes[0] still + check_added_monitors!(nodes[1], 1); + + if disconnect_count & !disconnect_flags > 2 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.is_none()); + } + + let as_commitment_update; + let bs_second_commitment_update; + + macro_rules! handle_bs_raa { () => { + as_commitment_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + } } + + macro_rules! handle_initial_raa { () => { + bs_second_commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap().unwrap(); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + } } + + if (disconnect_count & 8) == 0 { + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } else { + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + + assert!(bs_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } + + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + assert!(as_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_monitor_temporary_update_fail_a() { + do_test_monitor_temporary_update_fail(0); + do_test_monitor_temporary_update_fail(1); + do_test_monitor_temporary_update_fail(2); + do_test_monitor_temporary_update_fail(3); + do_test_monitor_temporary_update_fail(4); + do_test_monitor_temporary_update_fail(5); + } + + #[test] + fn test_monitor_temporary_update_fail_b() { + do_test_monitor_temporary_update_fail(2 | 8); + do_test_monitor_temporary_update_fail(3 | 8); + do_test_monitor_temporary_update_fail(4 | 8); + do_test_monitor_temporary_update_fail(5 | 8); + } + + #[test] + fn test_monitor_temporary_update_fail_c() { + do_test_monitor_temporary_update_fail(1 | 16); + do_test_monitor_temporary_update_fail(2 | 16); + do_test_monitor_temporary_update_fail(3 | 16); + do_test_monitor_temporary_update_fail(2 | 8 | 16); + do_test_monitor_temporary_update_fail(3 | 8 | 16); + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs