X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=9b5e2aa7df10ea4cccbcf77ed9433cb953d8f211;hb=1479b3875938974986eba15391fa2ca33bfdf1b2;hp=01faec671c309c5a870ce732bb2bbf2d2316a23d;hpb=92ff499bdbc115fbb897e5aa8bf758fc66987e95;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 01faec67..9b5e2aa7 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -1,7 +1,9 @@ //! The top-level channel management and payment tracking stuff lives here. +//! //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those //! upon reconnect to the relevant peer(s). +//! //! It does not manage routing logic (see ln::router for that) nor does it manage constructing //! on-chain transactions (it only monitors the chain to watch for any force-closes that might //! imply it needs to fail HTLCs/payments/channels it manages). @@ -20,7 +22,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; -use ln::channel::{Channel, ChannelKeys}; +use ln::channel::{Channel, ChannelError, ChannelKeys}; use ln::channelmonitor::ManyChannelMonitor; use ln::router::{Route,RouteHop}; use ln::msgs; @@ -171,6 +173,48 @@ impl MsgHandleErrInternal { fn from_no_close(err: msgs::HandleError) -> Self { Self { err, needs_channel_force_close: false } } + #[inline] + fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: false, + } + } + #[inline] + fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: true, + } + } } /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This @@ -223,6 +267,7 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. +/// /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through /// to individual Channels. pub struct ChannelManager { @@ -285,10 +330,14 @@ pub struct ChannelDetails { } impl ChannelManager { - /// Constructs a new ChannelManager to hold several channels and route between them. This is - /// the main "logic hub" for all channel-related actions, and implements ChannelMessageHandler. + /// Constructs a new ChannelManager to hold several channels and route between them. + /// + /// This is the main "logic hub" for all channel-related actions, and implements + /// ChannelMessageHandler. + /// /// fee_proportional_millionths is an optional fee to charge any payments routed through us. /// Non-proportional fees are fixed according to our risk using the provided fee estimator. + /// /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); @@ -324,12 +373,15 @@ impl ChannelManager { } /// Creates a new outbound channel to the given remote node and with the given value. + /// /// user_id will be provided back as user_channel_id in FundingGenerationReady and /// FundingBroadcastSafe events to allow tracking of which events correspond with which /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you /// may wish to avoid using 0 for user_id here. + /// /// If successful, will generate a SendOpenChannel event, so you should probably poll /// PeerManager::process_events afterwards. + /// /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> { let chan_keys = if cfg!(feature = "fuzztarget") { @@ -355,9 +407,15 @@ impl ChannelManager { let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?; let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator); let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.insert(channel.channel_id(), channel) { - Some(_) => panic!("RNG is bad???"), - None => {} + match channel_state.by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => { + if cfg!(feature = "fuzztarget") { + return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" }); + } else { + panic!("RNG is bad???"); + } + }, + hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } let mut events = self.pending_events.lock().unwrap(); @@ -407,8 +465,9 @@ impl ChannelManager { /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. + /// /// May generate a SendShutdown event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), HandleError> { + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let (mut res, node_id, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -422,7 +481,7 @@ impl ChannelManager { (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1)) } else { (res, chan_entry.get().get_their_node_id(), None) } }, - hash_map::Entry::Vacant(_) => return Err(HandleError{err: "No such channel", action: None}) + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"}) } }; for htlc_source in res.1.drain(..) { @@ -947,16 +1006,19 @@ impl ChannelManager { } /// Sends a payment along a given route. + /// /// Value parameters are provided via the last hop in route, see documentation for RouteHop /// fields for more info. + /// /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative /// payment), we don't do anything to stop you! We always try to ensure that if the provided /// next hop knows the preimage to payment_hash they can claim an additional amount as /// specified in the last hop in the route! Thus, you should probably do your own /// payment_preimage tracking (which you should already be doing as they represent "proof of /// payment") and prevent double-sends yourself. - /// See-also docs on Channel::send_htlc_and_commit. + /// /// May generate a SendHTLCs event on success, which should be relayed. + /// /// Raises APIError::RoutError when invalid route or forward parameter /// (cltv_delta, fee, node public key) is specified pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { @@ -1026,6 +1088,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed, }, }); @@ -1033,7 +1096,9 @@ impl ChannelManager { } /// Call this upon creation of a funding transaction for the given channel. + /// /// Panics if a funding transaction has already been provided for this channel. + /// /// May panic if the funding_txo is duplicative with some other channel (note that this should /// be trivially prevented by using unique funding transaction keys per-channel). pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { @@ -1107,6 +1172,7 @@ impl ChannelManager { } /// Processes HTLCs which are pending waiting on random forward delay. + /// /// Should only really ever be called in response to an PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { @@ -1189,6 +1255,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, })); @@ -1310,6 +1377,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: vec![msg], update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, }); @@ -1323,6 +1391,7 @@ impl ChannelManager { /// Provides a payment preimage in response to a PaymentReceived event, returning true and /// generating message events for the net layer to claim the payment, if possible. Thus, you /// should probably kick the net layer to go send messages if this returns true! + /// /// May panic if called except in response to a PaymentReceived event. pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool { let mut sha = Sha256::new(); @@ -1390,6 +1459,7 @@ impl ChannelManager { update_fulfill_htlcs: vec![msg], update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, } }); @@ -1439,7 +1509,8 @@ impl ChannelManager { } }; - let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)).map_err(|e| MsgHandleErrInternal::from_no_close(e))?; + let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) + .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let accept_msg = channel.get_accept_channel(); channel_state.by_id.insert(channel.channel_id(), channel); Ok(accept_msg) @@ -1454,7 +1525,8 @@ impl ChannelManager { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.accept_channel(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) }, //TODO: same as above @@ -1544,7 +1616,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.funding_locked(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; return Ok(self.get_announcement_sigs(chan)); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1664,7 +1737,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fulfill_htlc(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?.clone() + chan.update_fulfill_htlc(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }; @@ -1680,7 +1754,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }?; @@ -1752,7 +1827,11 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if (msg.failure_code & 0x8000) != 0 { + return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id)); + } + chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; Ok(()) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1840,7 +1919,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -1860,7 +1939,7 @@ impl ChannelManager { let our_node_id = self.get_our_node_id(); let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); @@ -1894,7 +1973,8 @@ impl ChannelManager { if chan.get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1907,6 +1987,44 @@ impl ChannelManager { } res } + + /// Begin Update fee process. Allowed only on an outbound channel. + /// If successful, will generate a UpdateHTLCs event, so you should probably poll + /// PeerManager::process_events afterwards. + /// Note: This API is likely to change! + #[doc(hidden)] + pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&channel_id) { + None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::APIMisuseError{err: "Channel is not in usuable state"}); + } + if !chan.is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); + } + if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::UpdateHTLCs { + node_id: chan.get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + } + }, + } + Ok(()) + } } impl events::EventsProvider for ChannelManager { @@ -2260,7 +2378,7 @@ mod tests { use rand::{thread_rng,Rng}; use std::cell::RefCell; - use std::collections::HashMap; + use std::collections::{BTreeSet, HashMap}; use std::default::Default; use std::rc::Rc; use std::sync::{Arc, Mutex}; @@ -2438,14 +2556,26 @@ mod tests { } impl Drop for Node { fn drop(&mut self) { - // Check that we processed all pending events - assert_eq!(self.node.get_and_clear_pending_events().len(), 0); - assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0); + if !::std::thread::panicking() { + // Check that we processed all pending events + assert_eq!(self.node.get_and_clear_pending_events().len(), 0); + assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0); + } } } fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { - node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap(); + create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001) + } + + fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { + let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked); + (announcement, as_update, bs_update, channel_id, tx) + } + + fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) { + node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); let events_1 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 1); @@ -2467,7 +2597,7 @@ mod tests { assert_eq!(events_2.len(), 1); match events_2[0] { Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => { - assert_eq!(*channel_value_satoshis, 100000); + assert_eq!(*channel_value_satoshis, channel_value); assert_eq!(user_channel_id, 42); tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut { @@ -2517,47 +2647,53 @@ mod tests { _ => panic!("Unexpected event"), }; - confirm_transaction(&node_a.chain_monitor, &tx, chan_id); - let events_5 = node_a.node.get_and_clear_pending_events(); + confirm_transaction(&node_b.chain_monitor, &tx, chan_id); + let events_5 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_b.node.get_our_node_id()); + assert_eq!(*node_id, node_a.node.get_our_node_id()); assert!(announcement_sigs.is_none()); - node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap() + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap() }, _ => panic!("Unexpected event"), }; let channel_id; - confirm_transaction(&node_b.chain_monitor, &tx, chan_id); - let events_6 = node_b.node.get_and_clear_pending_events(); + confirm_transaction(&node_a.chain_monitor, &tx, chan_id); + let events_6 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 1); - let as_announcement_sigs = match events_6[0] { + (match events_6[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); channel_id = msg.channel_id.clone(); - let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap(); - node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap(); - as_announcement_sigs + assert_eq!(*node_id, node_b.node.get_our_node_id()); + (msg.clone(), announcement_sigs.clone().unwrap()) }, _ => panic!("Unexpected event"), + }, channel_id, tx) + } + + fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { + let bs_announcement_sigs = { + let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap(); + node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); + bs_announcement_sigs }; - let events_7 = node_a.node.get_and_clear_pending_events(); + let events_7 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_7.len(), 1); - let (announcement, as_update) = match events_7[0] { + let (announcement, bs_update) = match events_7[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg, update_msg) }, _ => panic!("Unexpected event"), }; - node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap(); - let events_8 = node_b.node.get_and_clear_pending_events(); + node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap(); + let events_8 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_8.len(), 1); - let bs_update = match events_8[0] { + let as_update = match events_8[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert!(*announcement == *msg); update_msg @@ -2567,11 +2703,15 @@ mod tests { *node_a.network_chan_count.borrow_mut() += 1; - ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx) + ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone()) } fn create_announced_chan_between_nodes(nodes: &Vec, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { - let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]); + create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001) + } + + fn create_announced_chan_between_nodes_with_value(nodes: &Vec, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { + let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat); for node in nodes { assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap()); node.router.handle_channel_update(&chan_announcement.1).unwrap(); @@ -2580,6 +2720,17 @@ mod tests { (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) } + macro_rules! check_spends { + ($tx: expr, $spends_tx: expr) => { + { + let mut funding_tx_map = HashMap::new(); + let spends_tx = $spends_tx; + funding_tx_map.insert(spends_tx.txid(), spends_tx); + $tx.verify(&funding_tx_map).unwrap(); + } + } + } + fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) { let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; @@ -2623,9 +2774,7 @@ mod tests { tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); } assert_eq!(tx_a, tx_b); - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(funding_tx.txid(), funding_tx); - tx_a.verify(&funding_tx_map).unwrap(); + check_spends!(tx_a, funding_tx); let events_2 = node_a.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); @@ -2656,10 +2805,11 @@ mod tests { impl SendEvent { fn from_event(event: Event) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, commitment_signed } } => { + Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed } }, _ => panic!("Unexpected event type!"), @@ -2667,36 +2817,28 @@ mod tests { } } + macro_rules! check_added_monitors { + ($node: expr, $count: expr) => { + { + let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), $count); + added_monitors.clear(); + } + } + } + macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { { - { - let added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); - assert!(added_monitors.is_empty()); - } + check_added_monitors!($node_a, 0); let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); - { - let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - { - let added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert!(added_monitors.is_empty()); - } + check_added_monitors!($node_a, 1); + check_added_monitors!($node_b, 0); assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - { - let mut added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node_b, 1); let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); assert!(bs_none.is_none()); - { - let mut added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node_b, 1); if $fail_backwards { assert!($node_a.node.get_and_clear_pending_events().is_empty()); } @@ -2715,24 +2857,26 @@ mod tests { } } + macro_rules! get_payment_preimage_hash { + ($node: expr) => { + { + let payment_preimage = [*$node.network_payment_count.borrow(); 32]; + *$node.network_payment_count.borrow_mut() += 1; + let mut payment_hash = [0; 32]; + let mut sha = Sha256::new(); + sha.input(&payment_preimage[..]); + sha.result(&mut payment_hash); + (payment_preimage, payment_hash) + } + } + } + fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { - let our_payment_preimage = [*origin_node.network_payment_count.borrow(); 32]; - *origin_node.network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); let mut payment_event = { origin_node.node.send_payment(route, our_payment_hash).unwrap(); - { - let mut added_monitors = origin_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(origin_node, 1); let mut events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2744,11 +2888,7 @@ mod tests { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - { - let added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - } - + check_added_monitors!(node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); let events_1 = node.node.get_and_clear_pending_events(); @@ -2772,11 +2912,7 @@ mod tests { _ => panic!("Unexpected event"), } } else { - { - let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -2789,25 +2925,17 @@ mod tests { fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fulfill_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - if $last_node { - assert_eq!(added_monitors.len(), 0); - } else { - assert_eq!(added_monitors.len(), 1); - } - added_monitors.clear(); + if $last_node { + check_added_monitors!($node, 0); + } else { + check_added_monitors!($node, 1); } commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); } @@ -2826,11 +2954,12 @@ mod tests { if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); expected_next_node = node_id.clone(); next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())); }, @@ -2882,15 +3011,7 @@ mod tests { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } - let our_payment_preimage = [*origin_node.network_payment_count.borrow(); 32]; - *origin_node.network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node); let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap(); match err { @@ -2906,11 +3027,7 @@ mod tests { fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fail_dance { @@ -2937,11 +3054,12 @@ mod tests { if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); expected_next_node = node_id.clone(); next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); }, @@ -3005,6 +3123,528 @@ mod tests { nodes } + #[test] + fn test_async_inbound_update_fee() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // A B + // update_fee -> + // send (1) commitment_signed -. + // <- update_add_htlc/commitment_signed + // send (2) RAA (awaiting remote revoke) -. + // (1) commitment_signed is delivered -> + // .- send (3) RAA (awaiting remote revoke) + // (2) RAA is delivered -> + // .- send (4) commitment_signed + // <- (3) RAA is delivered + // send (5) commitment_signed -. + // <- (4) commitment_signed is delivered + // send (6) RAA -. + // (5) commitment_signed is delivered -> + // <- RAA + // (6) RAA is delivered -> + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + // deliver(1), generate (3): + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack + check_added_monitors!(nodes[1], 1); + + let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) + assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4) + check_added_monitors!(nodes[1], 1); + + let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3) + assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5) + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4) + assert!(as_second_commitment_signed.is_none()); // only (6) + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5) + assert!(bs_second_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6) + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_unordered_raa() { + // Just the intro to the previous test followed by an out-of-order RAA (which caused a + // crash in an earlier version of the update_fee patch) + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let update_msg = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + update_fee.as_ref() + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2) + check_added_monitors!(nodes[1], 1); + + // We can't continue, sadly, because our (1) now has a bogus signature + } + + #[test] + fn test_multi_flight_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // update_fee/commitment_signed -> + // .- send (1) RAA and (2) commitment_signed + // update_fee (never committed) -> + // (3) update_fee -> + // We have to manually generate the above update_fee, it is allowed by the protocol but we + // don't track which updates correspond to which revoke_and_ack responses so we're in + // AwaitingRAA mode and will not generate the update_fee yet. + // <- (1) RAA delivered + // (3) is generated and send (4) CS -. + // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't + // know the per_commitment_point to use for it. + // <- (2) commitment_signed delivered + // revoke_and_ack -> + // B should send no response here + // (4) commitment_signed delivered -> + // <- RAA/commitment_signed delivered + // revoke_and_ack -> + + // First nodes[0] generates an update_fee + let initial_feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref().unwrap(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + // Deliver first update_fee/commitment_signed pair, generating (1) and (2): + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap(); + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + check_added_monitors!(nodes[1], 1); + + // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment + // transaction: + nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Create the (3) update_fee message that nodes[0] will generate before it does... + let mut update_msg_2 = msgs::UpdateFee { + channel_id: update_msg_1.channel_id.clone(), + feerate_per_kw: (initial_feerate + 30) as u32, + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; + // Deliver (3) + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + // Deliver (1), generating (3) and (4) + let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + // Check that the update_fee newly generated matches what we delivered: + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + + // Deliver (2) commitment_signed + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_commitment_signed.is_none()); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + // Delever (4) + let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap(); + assert!(as_second_commitment.is_none()); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_vanilla() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_with_fundee_update_add_htlc() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[1], 1); + + let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]); + + // nothing happens since node[1] is in AwaitingRemoteRevoke + nodes[1].node.send_payment(route, our_payment_hash).unwrap(); + { + let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 0); + added_monitors.clear(); + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + // node[1] has nothing to do + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + // AwaitingRemoteRevoke ends here + + let commitment_update = resp_option.unwrap(); + assert_eq!(commitment_update.update_add_htlcs.len(), 1); + assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); + assert_eq!(commitment_update.update_fee.is_none(), true); + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap(); + let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + let commitment_signed = commitment_signed.unwrap(); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(resp_option.is_none()); + + let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(commitment_signed.is_none()); + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(resp_option.is_none()); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { .. } => { }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + + #[test] + fn test_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // (1) update_fee/commitment_signed -> + // <- (2) revoke_and_ack + // .- send (3) commitment_signed + // (4) update_fee/commitment_signed -> + // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) + // <- (3) commitment_signed delivered + // send (6) revoke_and_ack -. + // <- (5) deliver revoke_and_ack + // (6) deliver revoke_and_ack -> + // .- send (7) commitment_signed in response to (4) + // <- (7) deliver commitment_signed + // revoke_and_ack -> + + // Create and deliver (1)... + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // Generate (2) and (3): + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed_0 = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Deliver (2): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Create and deliver (4)... + nodes[0].node.update_fee(channel_id, feerate+30).unwrap(); + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + // ... creating (5) + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Handle (3), creating (6): + let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (5): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (6), creating (7): + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); + let commitment_signed = resp_option.unwrap().commitment_signed; + check_added_monitors!(nodes[1], 1); + + // Deliver (7) + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + + assert_eq!(get_feerate!(nodes[0]), feerate + 30); + assert_eq!(get_feerate!(nodes[1]), feerate + 30); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -3163,9 +3803,7 @@ mod tests { let mut res = Vec::with_capacity(2); node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(chan.3.txid(), chan.3.clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, chan.3.clone()); if commitment_tx.is_none() { res.push(tx.clone()); } @@ -3181,9 +3819,7 @@ mod tests { if has_htlc_tx != HTLCType::NONE { node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(res[0].txid(), res[0].clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, res[0].clone()); if has_htlc_tx == HTLCType::TIMEOUT { assert!(tx.lock_time != 0); } else { @@ -3207,9 +3843,7 @@ mod tests { assert_eq!(node_txn.len(), 1); node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(revoked_tx.txid(), revoked_tx.clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, revoked_tx.clone()); false } else { true } }); @@ -3225,10 +3859,7 @@ mod tests { for tx in prev_txn { if node_txn[0].input[0].previous_output.txid == tx.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(tx.txid(), tx.clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); - + check_spends!(node_txn[0], tx.clone()); assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output assert_eq!(tx.input.len(), 1); // must spend a commitment tx @@ -3268,6 +3899,281 @@ mod tests { } } + #[test] + fn channel_reserve_test() { + use util::rng; + use std::sync::atomic::Ordering; + use ln::msgs::HandleError; + + macro_rules! get_channel_value_stat { + ($node: expr, $channel_id: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + chan.get_value_stat() + }} + } + + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001); + + let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2); + + let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2); + + macro_rules! get_route_and_payment_hash { + ($recv_value: expr) => {{ + let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]); + (route, payment_hash, payment_preimage) + }} + }; + + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + }; + + macro_rules! expect_forward { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + check_added_monitors!($node, 1); + let payment_event = SendEvent::from_event(events.remove(0)); + payment_event + }} + } + + macro_rules! expect_payment_received { + ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!($expected_payment_hash, *payment_hash); + assert_eq!($expected_recv_value, amt); + }, + _ => panic!("Unexpected event"), + } + } + }; + + let feemsat = 239; // somehow we know? + let total_fee_msat = (nodes.len() - 2) as u64 * 239; + + let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat; + + // attempt to send amt_msat > their_max_htlc_value_in_flight_msat + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1); + assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap(); + match err { + APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"), + _ => panic!("Unknown error variants"), + } + } + + let mut htlc_id = 0; + // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete + // nodes[0]'s wealth + loop { + let amt_msat = recv_value_0 + total_fee_msat; + if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat { + break; + } + send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0); + htlc_id += 1; + + let (stat01_, stat11_, stat12_, stat22_) = ( + get_channel_value_stat!(nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], chan_2.2), + get_channel_value_stat!(nodes[2], chan_2.2), + ); + + assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); + assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); + assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); + assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); + stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; + } + + { + let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat; + // attempt to get channel_reserve violation + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1); + let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap(); + match err { + APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + // adding pending output + let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2; + let amt_msat_1 = recv_value_1 + total_fee_msat; + + let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1); + let payment_event_1 = { + nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap(); + + // channel reserve test with htlc pending output > 0 + let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat; + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1); + match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() { + APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + { + // test channel_reserve test on nodes[1] side + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1); + + // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&secp_ctx, &{ + let mut session_key = [0; 32]; + rng::fill_bytes(&mut session_key); + session_key + }).expect("RNG is bad!"); + + let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1; + let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap(); + let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash); + let msg = msgs::UpdateAddHTLC { + channel_id: chan_1.2, + htlc_id, + amount_msat: htlc_msat, + payment_hash: our_payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + }; + + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + } + + // split the rest to test holding cell + let recv_value_21 = recv_value_2/2; + let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat; + { + let stat = get_channel_value_stat!(nodes[0], chan_1.2); + assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat); + } + + // now see if they go through on both sides + let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21); + // but this will stuck in the holding cell + nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap(); + check_added_monitors!(nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // test with outbound holding cell amount > 0 + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1); + match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() { + APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22); + // this will also stuck in the holding cell + nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap(); + check_added_monitors!(nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // flush the pending htlc + let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + + let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap(); + check_added_monitors!(nodes[0], 1); + let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_none.is_none()); + check_added_monitors!(nodes[0], 1); + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_11 = expect_forward!(nodes[1]); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1); + + // flush the htlcs in the holding cell + assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_3 = expect_forward!(nodes[1]); + assert_eq!(payment_event_3.msgs.len(), 2); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap(); + + commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(our_payment_hash_21, *payment_hash); + assert_eq!(recv_value_21, amt); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(our_payment_hash_22, *payment_hash); + assert_eq!(recv_value_22, amt); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); + + let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat); + let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); + assert_eq!(stat0.value_to_self_msat, expected_value_to_self); + assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat); + + let stat2 = get_channel_value_stat!(nodes[2], chan_2.2); + assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -3317,11 +4223,7 @@ mod tests { ($node: expr, $prev_node: expr, $preimage: expr) => { { assert!($node.node.claim_funds($preimage)); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node, 1); let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3396,6 +4298,13 @@ mod tests { let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; // Get the will-be-revoked local txn from nodes[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout // Revoke the old state claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); @@ -3406,11 +4315,9 @@ mod tests { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected - assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); node_txn.swap_remove(0); } test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); @@ -3426,6 +4333,173 @@ mod tests { assert_eq!(nodes[1].node.list_channels().len(), 0); } + #[test] + fn revoked_output_claim() { + // Simple test to ensure a node will claim a revoked output when a stale remote commitment + // transaction is broadcast by its counterparty + let nodes = create_network(2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 1); + // Only output is the full channel value back to nodes[0]: + assert_eq!(revoked_local_txn[0].output.len(), 1); + // Send a payment through, updating everyone's latest commitment txn + send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000); + + // Inform nodes[1] that nodes[0] broadcast a stale tx + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once + + assert_eq!(node_txn[0], node_txn[2]); + + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + check_spends!(node_txn[1], chan_1.3.clone()); + + // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + get_announce_close_broadcast_events(&nodes, 0, 1); + } + + #[test] + fn claim_htlc_outputs_shared_tx() { + // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx + let nodes = create_network(2); + + // Create some new channel: + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone()); + + //Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + + { + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 4); + + assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + + assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning + + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len()); + witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 3); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + + // Next nodes[1] broadcasts its current local tx state: + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager + + assert_eq!(node_txn[2].input.len(), 1); + let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap(); + assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid()); + assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); + assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + + #[test] + fn claim_htlc_outputs_single_tx() { + // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx + let nodes = create_network(2); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this + // time as two different claim transactions as we're gonna to timeout htlc with given a high current height + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + + //Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + + { + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + + assert_eq!(node_txn[0], node_txn[7]); + assert_eq!(node_txn[1], node_txn[8]); + assert_eq!(node_txn[2], node_txn[9]); + assert_eq!(node_txn[3], node_txn[10]); + assert_eq!(node_txn[4], node_txn[11]); + assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger + assert_eq!(node_txn[4], node_txn[6]); + + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[2].input.len(), 1); + + let mut revoked_tx_map = HashMap::new(); + revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); + node_txn[0].verify(&revoked_tx_map).unwrap(); + node_txn[1].verify(&revoked_tx_map).unwrap(); + node_txn[2].verify(&revoked_tx_map).unwrap(); + + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 3); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], chan_1.3.clone()); + + assert_eq!(node_txn[4].input.len(), 1); + let witness_script = node_txn[4].input[0].witness.last().unwrap(); + assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid()); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + #[test] fn test_htlc_ignore_latest_remote_commitment() { // Test that HTLC transactions spending the latest remote commitment transaction are simply @@ -3477,23 +4551,11 @@ mod tests { let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); - let our_payment_preimage = [*nodes[0].network_payment_count.borrow(); 32]; - *nodes[0].network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); let mut payment_event = { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); - { - let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3518,20 +4580,10 @@ mod tests { payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); - { - let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - + check_added_monitors!(nodes[1], 1); nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - - { - let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(nodes[2], 1); // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC @@ -3582,9 +4634,8 @@ mod tests { assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(tx.txid(), tx); - node_txn[0].verify(&funding_tx_map).unwrap(); + + check_spends!(node_txn[0], tx); } #[test] @@ -3631,28 +4682,20 @@ mod tests { for msg in reestablish_1 { resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap()); } - { - let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); - if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { - assert_eq!(added_monitors.len(), 1); - } else { - assert!(added_monitors.is_empty()); - } - added_monitors.clear(); + if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { + check_added_monitors!(node_b, 1); + } else { + check_added_monitors!(node_b, 0); } let mut resp_2 = Vec::new(); for msg in reestablish_2 { resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap()); } - { - let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); - if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { - assert_eq!(added_monitors.len(), 1); - } else { - assert!(added_monitors.is_empty()); - } - added_monitors.clear(); + if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { + check_added_monitors!(node_a, 1); + } else { + check_added_monitors!(node_a, 0); } // We dont yet support both needing updates, as that would require a different commitment dance: