X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=42dcc6c77016fabac9e39ef8bad09a6c490194fb;hb=3f2d379835e5bb73c44c536786e228ff07dff9a0;hp=1b5323c816cf1bc67b8afbf6b57068a0cd392f76;hpb=a805567683ea3a495122915671fef125fb45264f;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 1b5323c8..42dcc6c7 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -22,7 +22,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; -use ln::channel::{Channel, ChannelKeys}; +use ln::channel::{Channel, ChannelError, ChannelKeys}; use ln::channelmonitor::ManyChannelMonitor; use ln::router::{Route,RouteHop}; use ln::msgs; @@ -173,6 +173,48 @@ impl MsgHandleErrInternal { fn from_no_close(err: msgs::HandleError) -> Self { Self { err, needs_channel_force_close: false } } + #[inline] + fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: false, + } + } + #[inline] + fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: true, + } + } } /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This @@ -1046,6 +1088,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed, }, }); @@ -1212,6 +1255,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, })); @@ -1333,6 +1377,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: vec![msg], update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, }); @@ -1414,6 +1459,7 @@ impl ChannelManager { update_fulfill_htlcs: vec![msg], update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, } }); @@ -1463,7 +1509,8 @@ impl ChannelManager { } }; - let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)).map_err(|e| MsgHandleErrInternal::from_no_close(e))?; + let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) + .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let accept_msg = channel.get_accept_channel(); channel_state.by_id.insert(channel.channel_id(), channel); Ok(accept_msg) @@ -1478,7 +1525,8 @@ impl ChannelManager { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.accept_channel(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) }, //TODO: same as above @@ -1568,7 +1616,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.funding_locked(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; return Ok(self.get_announcement_sigs(chan)); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1688,7 +1737,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fulfill_htlc(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?.clone() + chan.update_fulfill_htlc(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }; @@ -1704,7 +1754,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }?; @@ -1776,7 +1827,11 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if (msg.failure_code & 0x8000) != 0 { + return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id)); + } + chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; Ok(()) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1864,7 +1919,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -1884,7 +1939,7 @@ impl ChannelManager { let our_node_id = self.get_our_node_id(); let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); @@ -1918,7 +1973,8 @@ impl ChannelManager { if chan.get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1931,6 +1987,44 @@ impl ChannelManager { } res } + + /// Begin Update fee process. Allowed only on an outbound channel. + /// If successful, will generate a UpdateHTLCs event, so you should probably poll + /// PeerManager::process_events afterwards. + /// Note: This API is likely to change! + #[doc(hidden)] + pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&channel_id) { + None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::APIMisuseError{err: "Channel is not in usuable state"}); + } + if !chan.is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); + } + if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::UpdateHTLCs { + node_id: chan.get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + } + }, + } + Ok(()) + } } impl events::EventsProvider for ChannelManager { @@ -2606,6 +2700,17 @@ mod tests { (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) } + macro_rules! check_spends { + ($tx: expr, $spends_tx: expr) => { + { + let mut funding_tx_map = HashMap::new(); + let spends_tx = $spends_tx; + funding_tx_map.insert(spends_tx.txid(), spends_tx); + $tx.verify(&funding_tx_map).unwrap(); + } + } + } + fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) { let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; @@ -2649,9 +2754,7 @@ mod tests { tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); } assert_eq!(tx_a, tx_b); - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(funding_tx.txid(), funding_tx); - tx_a.verify(&funding_tx_map).unwrap(); + check_spends!(tx_a, funding_tx); let events_2 = node_a.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); @@ -2682,10 +2785,11 @@ mod tests { impl SendEvent { fn from_event(event: Event) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, commitment_signed } } => { + Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed } }, _ => panic!("Unexpected event type!"), @@ -2693,36 +2797,28 @@ mod tests { } } + macro_rules! check_added_monitors { + ($node: expr, $count: expr) => { + { + let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), $count); + added_monitors.clear(); + } + } + } + macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { { - { - let added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); - assert!(added_monitors.is_empty()); - } + check_added_monitors!($node_a, 0); let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); - { - let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - { - let added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert!(added_monitors.is_empty()); - } + check_added_monitors!($node_a, 1); + check_added_monitors!($node_b, 0); assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - { - let mut added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node_b, 1); let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); assert!(bs_none.is_none()); - { - let mut added_monitors = $node_b.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node_b, 1); if $fail_backwards { assert!($node_a.node.get_and_clear_pending_events().is_empty()); } @@ -2741,24 +2837,26 @@ mod tests { } } + macro_rules! get_payment_preimage_hash { + ($node: expr) => { + { + let payment_preimage = [*$node.network_payment_count.borrow(); 32]; + *$node.network_payment_count.borrow_mut() += 1; + let mut payment_hash = [0; 32]; + let mut sha = Sha256::new(); + sha.input(&payment_preimage[..]); + sha.result(&mut payment_hash); + (payment_preimage, payment_hash) + } + } + } + fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { - let our_payment_preimage = [*origin_node.network_payment_count.borrow(); 32]; - *origin_node.network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); let mut payment_event = { origin_node.node.send_payment(route, our_payment_hash).unwrap(); - { - let mut added_monitors = origin_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(origin_node, 1); let mut events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2770,11 +2868,7 @@ mod tests { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - { - let added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - } - + check_added_monitors!(node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); let events_1 = node.node.get_and_clear_pending_events(); @@ -2798,11 +2892,7 @@ mod tests { _ => panic!("Unexpected event"), } } else { - { - let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -2815,25 +2905,17 @@ mod tests { fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fulfill_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - if $last_node { - assert_eq!(added_monitors.len(), 0); - } else { - assert_eq!(added_monitors.len(), 1); - } - added_monitors.clear(); + if $last_node { + check_added_monitors!($node, 0); + } else { + check_added_monitors!($node, 1); } commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); } @@ -2852,11 +2934,12 @@ mod tests { if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); expected_next_node = node_id.clone(); next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())); }, @@ -2908,15 +2991,7 @@ mod tests { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } - let our_payment_preimage = [*origin_node.network_payment_count.borrow(); 32]; - *origin_node.network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node); let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap(); match err { @@ -2932,11 +3007,7 @@ mod tests { fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fail_dance { @@ -2963,11 +3034,12 @@ mod tests { if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); expected_next_node = node_id.clone(); next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); }, @@ -3031,6 +3103,528 @@ mod tests { nodes } + #[test] + fn test_async_inbound_update_fee() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // A B + // update_fee -> + // send (1) commitment_signed -. + // <- update_add_htlc/commitment_signed + // send (2) RAA (awaiting remote revoke) -. + // (1) commitment_signed is delivered -> + // .- send (3) RAA (awaiting remote revoke) + // (2) RAA is delivered -> + // .- send (4) commitment_signed + // <- (3) RAA is delivered + // send (5) commitment_signed -. + // <- (4) commitment_signed is delivered + // send (6) RAA -. + // (5) commitment_signed is delivered -> + // <- RAA + // (6) RAA is delivered -> + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + // deliver(1), generate (3): + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack + check_added_monitors!(nodes[1], 1); + + let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) + assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4) + check_added_monitors!(nodes[1], 1); + + let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3) + assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5) + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4) + assert!(as_second_commitment_signed.is_none()); // only (6) + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5) + assert!(bs_second_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6) + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_unordered_raa() { + // Just the intro to the previous test followed by an out-of-order RAA (which caused a + // crash in an earlier version of the update_fee patch) + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let update_msg = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + update_fee.as_ref() + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2) + check_added_monitors!(nodes[1], 1); + + // We can't continue, sadly, because our (1) now has a bogus signature + } + + #[test] + fn test_multi_flight_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // update_fee/commitment_signed -> + // .- send (1) RAA and (2) commitment_signed + // update_fee (never committed) -> + // (3) update_fee -> + // We have to manually generate the above update_fee, it is allowed by the protocol but we + // don't track which updates correspond to which revoke_and_ack responses so we're in + // AwaitingRAA mode and will not generate the update_fee yet. + // <- (1) RAA delivered + // (3) is generated and send (4) CS -. + // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't + // know the per_commitment_point to use for it. + // <- (2) commitment_signed delivered + // revoke_and_ack -> + // B should send no response here + // (4) commitment_signed delivered -> + // <- RAA/commitment_signed delivered + // revoke_and_ack -> + + // First nodes[0] generates an update_fee + let initial_feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref().unwrap(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + // Deliver first update_fee/commitment_signed pair, generating (1) and (2): + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap(); + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + check_added_monitors!(nodes[1], 1); + + // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment + // transaction: + nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Create the (3) update_fee message that nodes[0] will generate before it does... + let mut update_msg_2 = msgs::UpdateFee { + channel_id: update_msg_1.channel_id.clone(), + feerate_per_kw: (initial_feerate + 30) as u32, + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; + // Deliver (3) + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + // Deliver (1), generating (3) and (4) + let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + // Check that the update_fee newly generated matches what we delivered: + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + + // Deliver (2) commitment_signed + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_commitment_signed.is_none()); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + // Delever (4) + let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap(); + assert!(as_second_commitment.is_none()); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_vanilla() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_with_fundee_update_add_htlc() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[1], 1); + + let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]); + + // nothing happens since node[1] is in AwaitingRemoteRevoke + nodes[1].node.send_payment(route, our_payment_hash).unwrap(); + { + let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 0); + added_monitors.clear(); + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + // node[1] has nothing to do + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + // AwaitingRemoteRevoke ends here + + let commitment_update = resp_option.unwrap(); + assert_eq!(commitment_update.update_add_htlcs.len(), 1); + assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); + assert_eq!(commitment_update.update_fee.is_none(), true); + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap(); + let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + let commitment_signed = commitment_signed.unwrap(); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(resp_option.is_none()); + + let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(commitment_signed.is_none()); + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(resp_option.is_none()); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { .. } => { }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + + #[test] + fn test_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // (1) update_fee/commitment_signed -> + // <- (2) revoke_and_ack + // .- send (3) commitment_signed + // (4) update_fee/commitment_signed -> + // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) + // <- (3) commitment_signed delivered + // send (6) revoke_and_ack -. + // <- (5) deliver revoke_and_ack + // (6) deliver revoke_and_ack -> + // .- send (7) commitment_signed in response to (4) + // <- (7) deliver commitment_signed + // revoke_and_ack -> + + // Create and deliver (1)... + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // Generate (2) and (3): + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed_0 = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Deliver (2): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Create and deliver (4)... + nodes[0].node.update_fee(channel_id, feerate+30).unwrap(); + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + // ... creating (5) + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Handle (3), creating (6): + let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (5): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (6), creating (7): + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); + let commitment_signed = resp_option.unwrap().commitment_signed; + check_added_monitors!(nodes[1], 1); + + // Deliver (7) + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + + assert_eq!(get_feerate!(nodes[0]), feerate + 30); + assert_eq!(get_feerate!(nodes[1]), feerate + 30); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -3189,9 +3783,7 @@ mod tests { let mut res = Vec::with_capacity(2); node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(chan.3.txid(), chan.3.clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, chan.3.clone()); if commitment_tx.is_none() { res.push(tx.clone()); } @@ -3207,9 +3799,7 @@ mod tests { if has_htlc_tx != HTLCType::NONE { node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(res[0].txid(), res[0].clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, res[0].clone()); if has_htlc_tx == HTLCType::TIMEOUT { assert!(tx.lock_time != 0); } else { @@ -3233,9 +3823,7 @@ mod tests { assert_eq!(node_txn.len(), 1); node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(revoked_tx.txid(), revoked_tx.clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, revoked_tx.clone()); false } else { true } }); @@ -3251,10 +3839,7 @@ mod tests { for tx in prev_txn { if node_txn[0].input[0].previous_output.txid == tx.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(tx.txid(), tx.clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); - + check_spends!(node_txn[0], tx.clone()); assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output assert_eq!(tx.input.len(), 1); // must spend a commitment tx @@ -3343,11 +3928,7 @@ mod tests { ($node: expr, $prev_node: expr, $preimage: expr) => { { assert!($node.node.claim_funds($preimage)); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node, 1); let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3441,9 +4022,7 @@ mod tests { assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); node_txn.swap_remove(0); } test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); @@ -3481,13 +4060,8 @@ mod tests { assert_eq!(node_txn[0], node_txn[2]); - let mut revoked_tx_map = HashMap::new(); - revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); - node_txn[0].verify(&revoked_tx_map).unwrap(); - - revoked_tx_map.clear(); - revoked_tx_map.insert(chan_1.3.txid(), chan_1.3.clone()); - node_txn[1].verify(&revoked_tx_map).unwrap(); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + check_spends!(node_txn[1], chan_1.3.clone()); // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); @@ -3516,6 +4090,7 @@ mod tests { assert_eq!(revoked_local_txn[1].input.len(), 1); assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone()); //Revoke the old state claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); @@ -3529,11 +4104,9 @@ mod tests { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); - let mut revoked_tx_map = HashMap::new(); - revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); - assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs - node_txn[0].verify(&revoked_tx_map).unwrap(); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning let mut witness_lens = BTreeSet::new(); @@ -3561,6 +4134,77 @@ mod tests { assert_eq!(nodes[1].node.list_channels().len(), 0); } + #[test] + fn claim_htlc_outputs_single_tx() { + // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx + let nodes = create_network(2); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this + // time as two different claim transactions as we're gonna to timeout htlc with given a high current height + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + + //Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + + { + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + + assert_eq!(node_txn[0], node_txn[7]); + assert_eq!(node_txn[1], node_txn[8]); + assert_eq!(node_txn[2], node_txn[9]); + assert_eq!(node_txn[3], node_txn[10]); + assert_eq!(node_txn[4], node_txn[11]); + assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger + assert_eq!(node_txn[4], node_txn[6]); + + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[2].input.len(), 1); + + let mut revoked_tx_map = HashMap::new(); + revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); + node_txn[0].verify(&revoked_tx_map).unwrap(); + node_txn[1].verify(&revoked_tx_map).unwrap(); + node_txn[2].verify(&revoked_tx_map).unwrap(); + + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 3); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], chan_1.3.clone()); + + assert_eq!(node_txn[4].input.len(), 1); + let witness_script = node_txn[4].input[0].witness.last().unwrap(); + assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid()); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + #[test] fn test_htlc_ignore_latest_remote_commitment() { // Test that HTLC transactions spending the latest remote commitment transaction are simply @@ -3612,23 +4256,11 @@ mod tests { let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); - let our_payment_preimage = [*nodes[0].network_payment_count.borrow(); 32]; - *nodes[0].network_payment_count.borrow_mut() += 1; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); let mut payment_event = { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); - { - let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3653,20 +4285,10 @@ mod tests { payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); - { - let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - + check_added_monitors!(nodes[1], 1); nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - - { - let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(nodes[2], 1); // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC @@ -3717,9 +4339,8 @@ mod tests { assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(tx.txid(), tx); - node_txn[0].verify(&funding_tx_map).unwrap(); + + check_spends!(node_txn[0], tx); } #[test] @@ -3766,28 +4387,20 @@ mod tests { for msg in reestablish_1 { resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap()); } - { - let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); - if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { - assert_eq!(added_monitors.len(), 1); - } else { - assert!(added_monitors.is_empty()); - } - added_monitors.clear(); + if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { + check_added_monitors!(node_b, 1); + } else { + check_added_monitors!(node_b, 0); } let mut resp_2 = Vec::new(); for msg in reestablish_2 { resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap()); } - { - let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); - if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { - assert_eq!(added_monitors.len(), 1); - } else { - assert!(added_monitors.is_empty()); - } - added_monitors.clear(); + if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { + check_added_monitors!(node_a, 1); + } else { + check_added_monitors!(node_a, 0); } // We dont yet support both needing updates, as that would require a different commitment dance: