X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=c71013f950d9509f318ee21fd851ab582f3d5644;hb=f1eb4639f860bec63d3e6a311a9128d56d9b2803;hp=774fdf6aa6ae72339814f1cb5d3bae1bfe4c9c47;hpb=13b80ce8056684c093ba98ab83f4d431ab543ceb;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 774fdf6a..c71013f9 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -22,11 +22,12 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; -use ln::channel::{Channel, ChannelError, ChannelKeys}; -use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; +use ln::channel::{Channel, ChannelError}; +use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route,RouteHop}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder}; +use chain::keysinterface::KeysInterface; use util::{byte_utils, events, internal_traits, rng}; use util::sha2::Sha256; use util::ser::{Readable, Writeable}; @@ -221,6 +222,16 @@ impl MsgHandleErrInternal { } } +/// Pass to fail_htlc_backwwards to indicate the reason to fail the payment +/// after a PaymentReceived event. +#[derive(PartialEq)] +pub enum PaymentFailReason { + /// Indicate the preimage for payment_hash is not known after a PaymentReceived event + PreimageUnknown, + /// Indicate the payment amount is incorrect ( received is < expected or > 2*expected ) after a PaymentReceived event + AmountMismatch, +} + /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could @@ -291,6 +302,8 @@ pub struct ChannelManager { pending_events: Mutex>, + keys_manager: Arc, + logger: Arc, } @@ -363,7 +376,7 @@ impl ChannelManager { /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! - pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc) -> Result, secp256k1::Error> { + pub fn new(fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc, keys_manager: Arc) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); let res = Arc::new(ChannelManager { @@ -385,10 +398,12 @@ impl ChannelManager { forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), }), - our_network_key, + our_network_key: keys_manager.get_node_secret(), pending_events: Mutex::new(Vec::new()), + keys_manager, + logger, }); let weak_res = Arc::downgrade(&res); @@ -408,27 +423,7 @@ impl ChannelManager { /// /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> { - let chan_keys = if cfg!(feature = "fuzztarget") { - ChannelKeys { - funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), - commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - } - } else { - let mut key_seed = [0u8; 32]; - rng::fill_bytes(&mut key_seed); - match ChannelKeys::new_from_seed(&key_seed) { - Ok(key) => key, - Err(_) => panic!("RNG is busted!") - } - }; - - let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?; + let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?; let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator); let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.entry(channel.channel_id()) { @@ -586,6 +581,33 @@ impl ChannelManager { } } + fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { + match err { + ChannelMonitorUpdateErr::PermanentFailure => { + let mut chan = { + let channel_state = channel_state_lock.borrow_parts(); + let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + if let Some(short_id) = chan.get_short_channel_id() { + channel_state.short_to_id.remove(&short_id); + } + chan + }; + mem::drop(channel_state_lock); + self.finish_force_close_channel(chan.force_shutdown()); + let mut events = self.pending_events.lock().unwrap(); + if let Ok(update) = self.get_channel_update(&chan) { + events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + channel.monitor_update_failed(reason); + }, + } + } + #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) { ({ @@ -984,6 +1006,11 @@ impl ChannelManager { if let Some((err, code, chan_update)) = loop { let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. if !chan.is_live() { // channel_disabled break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); } @@ -1027,6 +1054,7 @@ impl ChannelManager { } /// only fails if the channel does not yet have an assigned short_id + /// May be called with channel_state already locked! fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { None => return Err(HandleError{err: "Channel not yet established", action: None}), @@ -1096,9 +1124,8 @@ impl ChannelManager { let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?; let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); - let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let (first_hop_node_id, update_add, commitment_signed) = { + let mut channel_state = self.channel_state.lock().unwrap(); let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), @@ -1106,32 +1133,45 @@ impl ChannelManager { }; let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + let res = { + let chan = channel_state.by_id.get_mut(&id).unwrap(); + if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + } + chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? + }; + match res { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); + return Err(APIError::MonitorUpdateFailed); + } + Some((update_add, commitment_signed)) + }, + None => None, } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? }; let first_hop_node_id = route.hops.first().unwrap().pubkey; match res { - Some(msgs) => (first_hop_node_id, msgs), + Some((update_add, commitment_signed)) => { + (first_hop_node_id, update_add, commitment_signed) + }, None => return Ok(()), } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - let mut events = self.pending_events.lock().unwrap(); events.push(events::Event::UpdateHTLCs { node_id: first_hop_node_id, @@ -1184,7 +1224,9 @@ impl ChannelManager { }, None => return } - }; // Release channel lock for install_watch_outpoint call, + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } @@ -1299,7 +1341,10 @@ impl ChannelManager { continue; }, }; - new_events.push((Some(monitor), events::Event::UpdateHTLCs { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!();// but def dont push the event... + } + new_events.push(events::Event::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), updates: msgs::CommitmentUpdate { update_add_htlcs: add_htlc_msgs, @@ -1309,7 +1354,7 @@ impl ChannelManager { update_fee: None, commitment_signed: commitment_msg, }, - })); + }); } } else { for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { @@ -1322,10 +1367,10 @@ impl ChannelManager { hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, }; - new_events.push((None, events::Event::PaymentReceived { + new_events.push(events::Event::PaymentReceived { payment_hash: forward_info.payment_hash, amt: forward_info.amt_to_forward, - })); + }); } } } @@ -1339,33 +1384,18 @@ impl ChannelManager { } if new_events.is_empty() { return } - - new_events.retain(|event| { - if let &Some(ref monitor) = &event.0 { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) { - unimplemented!();// but def dont push the event... - } - } - true - }); - let mut events = self.pending_events.lock().unwrap(); - events.reserve(new_events.len()); - for event in new_events.drain(..) { - events.push(event.1); - } + events.append(&mut new_events); } - /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event. - pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool { - // TODO: Add ability to return 0x4000|16 (incorrect_payment_amount) if the amount we - // received is < expected or > 2*expected + /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event. + pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool { let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); if let Some(mut sources) = removed_source { for htlc_with_hash in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } - self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() }); + self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: if reason == PaymentFailReason::PreimageUnknown {0x4000 | 15} else {0x4000 | 16}, data: Vec::new() }); } true } else { false } @@ -1416,7 +1446,13 @@ impl ChannelManager { let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { - Ok(msg) => (chan.get_their_node_id(), msg), + Ok(Some((msg, commitment_msg, chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_their_node_id(), Some((msg, commitment_msg))) + }, + Ok(None) => (chan.get_their_node_id(), None), Err(_e) => { //TODO: Do something with e? return; @@ -1425,13 +1461,9 @@ impl ChannelManager { }; match fail_msgs { - Some((msg, commitment_msg, chan_monitor)) => { + Some((msg, commitment_msg)) => { mem::drop(channel_state); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id, @@ -1496,7 +1528,13 @@ impl ChannelManager { let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok(msg) => (chan.get_their_node_id(), msg), + Ok((msgs, Some(chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!();// but def dont push the event... + } + (chan.get_their_node_id(), msgs) + }, + Ok((msgs, None)) => (chan.get_their_node_id(), msgs), Err(_e) => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel may be about to hit the chain. @@ -1507,13 +1545,7 @@ impl ChannelManager { }; mem::drop(channel_state); - if let Some(chan_monitor) = fulfill_msgs.1 { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - } - - if let Some((msg, commitment_msg)) = fulfill_msgs.0 { + if let Some((msg, commitment_msg)) = fulfill_msgs { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id: node_id, @@ -1540,7 +1572,83 @@ impl ChannelManager { /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update /// operation. pub fn test_restore_channel_monitor(&self) { - unimplemented!(); + let mut new_events = Vec::new(); + let mut close_results = Vec::new(); + let mut htlc_forwards = Vec::new(); + let mut htlc_failures = Vec::new(); + + { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + let short_to_id = channel_state.short_to_id; + channel_state.by_id.retain(|_, channel| { + if channel.is_awaiting_monitor_update() { + let chan_monitor = channel.channel_monitor(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + if let Some(short_id) = channel.get_short_channel_id() { + short_to_id.remove(&short_id); + } + close_results.push(channel.force_shutdown()); + if let Ok(update) = self.get_channel_update(&channel) { + new_events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + false + }, + ChannelMonitorUpdateErr::TemporaryFailure => true, + } + } else { + let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored(); + if !pending_forwards.is_empty() { + htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); + } + htlc_failures.append(&mut pending_failures); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + new_events.push(events::Event::UpdateHTLCs { + node_id: channel.get_their_node_id(), + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + new_events.push(events::Event::SendRevokeAndACK { + node_id: channel.get_their_node_id(), + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + true + } + } else { true } + }); + } + + for failure in htlc_failures.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + } + self.forward_htlcs(&mut htlc_forwards[..]); + + for res in close_results.drain(..) { + self.finish_force_close_channel(res); + } + + self.pending_events.lock().unwrap().append(&mut new_events); } fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result { @@ -1552,27 +1660,7 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())); } - let chan_keys = if cfg!(feature = "fuzztarget") { - ChannelKeys { - funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]).unwrap(), - revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]).unwrap(), - payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0]).unwrap(), - delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0]).unwrap(), - htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]).unwrap(), - channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0]).unwrap(), - channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0]).unwrap(), - commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - } - } else { - let mut key_seed = [0u8; 32]; - rng::fill_bytes(&mut key_seed); - match ChannelKeys::new_from_seed(&key_seed) { - Ok(key) => key, - Err(_) => panic!("RNG is busted!") - } - }; - - let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) + let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let accept_msg = channel.get_accept_channel(); channel_state.by_id.insert(channel.channel_id(), channel); @@ -1626,10 +1714,9 @@ impl ChannelManager { }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } - }; // Release channel lock for install_watch_outpoint call, - // note that this means if the remote end is misbehaving and sends a message for the same - // channel back-to-back with funding_created, we'll end up thinking they sent a message - // for a bogus channel. + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } @@ -1646,7 +1733,7 @@ impl ChannelManager { } fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { - let (funding_txo, user_id, monitor) = { + let (funding_txo, user_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1655,14 +1742,14 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor) + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_funding_txo().unwrap(), chan.get_user_id()) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); - } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::FundingBroadcastSafe { funding_txo: funding_txo, @@ -2042,7 +2129,7 @@ impl ChannelManager { } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option), MsgHandleErrInternal> { - let (revoke_and_ack, commitment_signed, chan_monitor) = { + let (revoke_and_ack, commitment_signed) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -2050,20 +2137,53 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))? + let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (revoke_and_ack, commitment_signed) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - Ok((revoke_and_ack, commitment_signed)) } + #[inline] + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + let mut forward_event = None; + if !pending_forwards.is_empty() { + let mut channel_state = self.channel_state.lock().unwrap(); + if channel_state.forward_htlcs.is_empty() { + forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + channel_state.next_forward = forward_event.unwrap(); + } + for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { + match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info })); + } + } + } + } + match forward_event { + Some(time) => { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PendingHTLCsForwardable { + time_forwardable: time + }); + } + None => {}, + } + } + } + fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result, MsgHandleErrInternal> { - let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = { + let ((res, pending_forwards, mut pending_failures), short_channel_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -2071,45 +2191,19 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } for failure in pending_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } - - let mut forward_event = None; - if !pending_forwards.is_empty() { - let mut channel_state = self.channel_state.lock().unwrap(); - if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state.next_forward = forward_event.unwrap(); - } - for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info }); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info })); - } - } - } - } - match forward_event { - Some(time) => { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PendingHTLCsForwardable { - time_forwardable: time - }); - } - None => {}, - } + self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]); Ok(res) } @@ -2169,7 +2263,7 @@ impl ChannelManager { } fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), MsgHandleErrInternal> { - let (res, chan_monitor) = { + let res = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -2178,16 +2272,17 @@ impl ChannelManager { } let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - (Ok((funding_locked, revoke_and_ack, commitment_update, order)), channel_monitor) + if let Some(monitor) = channel_monitor { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!(); + } + } + Ok((funding_locked, revoke_and_ack, commitment_update, order)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Some(monitor) = chan_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); - } - } + res } @@ -2204,6 +2299,9 @@ impl ChannelManager { if !chan.is_outbound() { return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } if !chan.is_live() { return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); } @@ -2552,8 +2650,10 @@ mod tests { use chain::chaininterface; use chain::transaction::OutPoint; use chain::chaininterface::ChainListener; - use ln::channelmanager::{ChannelManager,OnionKeys}; - use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; + use chain::keysinterface::KeysInterface; + use chain::keysinterface; + use ln::channelmanager::{ChannelManager,OnionKeys,PaymentFailReason}; + use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route, RouteHop, Router}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; @@ -3018,15 +3118,17 @@ mod tests { commitment_msg: msgs::CommitmentSigned, } impl SendEvent { + fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent { + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } + } + fn from_event(event: Event) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { - assert!(update_fulfill_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed } - }, + Event::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), _ => panic!("Unexpected event type!"), } } @@ -3241,7 +3343,7 @@ mod tests { } fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { - assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); + assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown)); check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None; @@ -3322,14 +3424,12 @@ mod tests { let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger))); let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}); + let mut seed = [0; 32]; + rng.fill_bytes(&mut seed); + let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger))); let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone())); - let node_id = { - let mut key_slice = [0; 32]; - rng.fill_bytes(&mut key_slice); - SecretKey::from_slice(&secp_ctx, &key_slice).unwrap() - }; - let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap(); - let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger)); + let node = ChannelManager::new(0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone()).unwrap(); + let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger)); nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, network_payment_count: payment_count.clone(), network_chan_count: chan_count.clone(), @@ -4115,6 +4215,19 @@ mod tests { } } + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + } + #[test] fn channel_reserve_test() { use util::rng; @@ -4147,19 +4260,6 @@ mod tests { }} }; - macro_rules! expect_pending_htlcs_forwardable { - ($node: expr) => {{ - let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; - $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); - $node.node.process_pending_htlc_forwards(); - }} - }; - macro_rules! expect_forward { ($node: expr) => {{ let mut events = $node.node.get_and_clear_pending_events(); @@ -5453,6 +5553,447 @@ mod tests { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } + #[test] + fn test_simple_monitor_permanent_update_fail() { + // Test that we handle a simple permanent monitor update failure + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + }; + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { + // Test that we can recover from a simple temporary monitor update failure optionally with + // a disconnect in between + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let mut events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let payment_event = SendEvent::from_event(events_2.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + + // Now set it to failed again... + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let events_5 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + #[test] + fn test_simple_monitor_temporary_update_fail() { + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); + } + + fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { + let disconnect_flags = 8 | 16; + + // Test that we can recover from a temporary monitor update failure with some in-flight + // HTLCs going on at the same time potentially with some disconnection thrown in. + // * First we route a payment, then get a temporary monitor update failure when trying to + // route a second payment. We then claim the first payment. + // * If disconnect_count is set, we will disconnect at this point (which is likely as + // TemporaryFailure likely indicates net disconnect which resulted in failing to update + // the ChannelMonitor on a watchtower). + // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment + // immediately, otherwise we wait sconnect and deliver them via the reconnect + // channel_reestablish processing (ie disconnect_count & 16 makes no sense if + // disconnect_count & !disconnect_flags is 0). + // * We then update the channel monitor, reconnecting if disconnect_count is set and walk + // through message sending, potentially disconnect/reconnecting multiple times based on + // disconnect_count, to get the update_fulfill_htlc through. + // * We then walk through more message exchanges to get the original update_add_htlc + // through, swapping message ordering based on disconnect_count & 8 and optionally + // disconnect/reconnecting based on disconnect_count. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] + // but nodes[0] won't respond since it is frozen. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + if (disconnect_count & 16) == 0 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + } + + (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if disconnect_count & !disconnect_flags > 0 { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + } + + // Now fix monitor updating... + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + macro_rules! disconnect_reconnect_peers { () => { { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + } } } + + let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let mut as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + check_added_monitors!(nodes[0], 0); + let mut bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + if (disconnect_count & 16) == 0 { + assert!(bs_resp.2.is_none()); + + assert!(as_resp.1.is_some()); + assert!(as_resp.2.is_some()); + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + } else { + assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); + assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); + assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + + assert!(as_resp.1.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (as_resp_raa, as_resp_cu) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(as_resp_cu.is_none()); + check_added_monitors!(nodes[0], 1); + + as_resp.1 = Some(as_resp_raa); + bs_resp.2 = None; + } + + if disconnect_count & !disconnect_flags > 1 { + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + + if (disconnect_count & 16) == 0 { + assert!(reestablish_1 == second_reestablish_1); + assert!(reestablish_2 == second_reestablish_2); + } + assert!(as_resp == second_as_resp); + assert!(bs_resp == second_bs_resp); + } + + (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) + } else { + let mut events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 2); + (SendEvent::from_event(events_4.remove(0)), match events_4[0] { + Event::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), + }) + }; + + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting an RAA from nodes[0] still + check_added_monitors!(nodes[1], 1); + + if disconnect_count & !disconnect_flags > 2 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.is_none()); + } + + let as_commitment_update; + let bs_second_commitment_update; + + macro_rules! handle_bs_raa { () => { + as_commitment_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + } } + + macro_rules! handle_initial_raa { () => { + bs_second_commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap().unwrap(); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + } } + + if (disconnect_count & 8) == 0 { + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } else { + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + + assert!(bs_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } + + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + assert!(as_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_monitor_temporary_update_fail_a() { + do_test_monitor_temporary_update_fail(0); + do_test_monitor_temporary_update_fail(1); + do_test_monitor_temporary_update_fail(2); + do_test_monitor_temporary_update_fail(3); + do_test_monitor_temporary_update_fail(4); + do_test_monitor_temporary_update_fail(5); + } + + #[test] + fn test_monitor_temporary_update_fail_b() { + do_test_monitor_temporary_update_fail(2 | 8); + do_test_monitor_temporary_update_fail(3 | 8); + do_test_monitor_temporary_update_fail(4 | 8); + do_test_monitor_temporary_update_fail(5 | 8); + } + + #[test] + fn test_monitor_temporary_update_fail_c() { + do_test_monitor_temporary_update_fail(1 | 16); + do_test_monitor_temporary_update_fail(2 | 16); + do_test_monitor_temporary_update_fail(3 | 16); + do_test_monitor_temporary_update_fail(2 | 8 | 16); + do_test_monitor_temporary_update_fail(3 | 8 | 16); + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs