From: Antoine Riard Date: Tue, 5 Nov 2019 23:51:05 +0000 (-0500) Subject: Drop Result for ChannelMessageHandler methods X-Git-Tag: v0.0.12~161^2 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=refs%2Fheads%2F2020-01-398-fixups;p=rust-lightning Drop Result for ChannelMessageHandler methods Simplify interfaces between ChannelMessageHandler and PeerManager, by switching all ChannelMessageHandler errors to HandleError sent internally instead of being return. With further refactors in Router and PeerChannelEncryptor, errors management on the PeerManager-side won't be splitted between try_potential_handleerror and HandleError processing. Inside ChannelManager, we now log MsgHandleErrInternal and send ErrorAction to PeerManager. On a high-level, it should allow client using API to be more flexible by polling events instead of waiting function call returns. We also update handle_error macro to take channel_state_lock from caller which should avoid some deadlock potential for some edges cases. Filter out IgnoreError in handle_error macro, update test in consequence. --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 52d5a658d..55a78b29d 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -29,7 +29,7 @@ use lightning::ln::channelmonitor; use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate}; use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, ChannelManagerReadArgs}; use lightning::ln::router::{Route, RouteHop}; -use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, LightningError, UpdateAddHTLC, LocalFeatures}; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, UpdateAddHTLC, LocalFeatures, ErrorAction}; use lightning::util::enforcing_trait_impls::EnforcingChannelKeys; use lightning::util::events; use lightning::util::logger::Logger; @@ -252,7 +252,7 @@ pub fn do_test(data: &[u8]) { } else { panic!("Wrong event type"); } }; - $dest.handle_open_channel(&$source.get_our_node_id(), LocalFeatures::new(), &open_channel).unwrap(); + $dest.handle_open_channel(&$source.get_our_node_id(), LocalFeatures::new(), &open_channel); let accept_channel = { let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -261,7 +261,7 @@ pub fn do_test(data: &[u8]) { } else { panic!("Wrong event type"); } }; - $source.handle_accept_channel(&$dest.get_our_node_id(), LocalFeatures::new(), &accept_channel).unwrap(); + $source.handle_accept_channel(&$dest.get_our_node_id(), LocalFeatures::new(), &accept_channel); { let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -282,7 +282,7 @@ pub fn do_test(data: &[u8]) { msg.clone() } else { panic!("Wrong event type"); } }; - $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created).unwrap(); + $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created); let funding_signed = { let events = $dest.get_and_clear_pending_msg_events(); @@ -291,7 +291,7 @@ pub fn do_test(data: &[u8]) { msg.clone() } else { panic!("Wrong event type"); } }; - $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed).unwrap(); + $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed); { let events = $source.get_and_clear_pending_events(); @@ -330,7 +330,7 @@ pub fn do_test(data: &[u8]) { if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event { for node in $nodes.iter() { if node.get_our_node_id() == *node_id { - node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg).unwrap(); + node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg); } } } else { panic!("Wrong event type"); } @@ -381,16 +381,6 @@ pub fn do_test(data: &[u8]) { let mut node_c_ser = VecWriter(Vec::new()); nodes[2].write(&mut node_c_ser).unwrap(); - macro_rules! test_err { - ($res: expr) => { - match $res { - Ok(()) => {}, - Err(LightningError { action: ErrorAction::IgnoreError, .. }) => { }, - _ => { $res.unwrap() }, - } - } - } - macro_rules! test_return { () => { { assert_eq!(nodes[0].list_channels().len(), 1); @@ -470,7 +460,7 @@ pub fn do_test(data: &[u8]) { assert!(update_fee.is_none()); for update_add in update_add_htlcs { if !$corrupt_forward { - test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add)); + dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add); } else { // Corrupt the update_add_htlc message so that its HMAC // check will fail and we generate a @@ -479,33 +469,33 @@ pub fn do_test(data: &[u8]) { let mut msg_ser = update_add.encode(); msg_ser[1000] ^= 0xff; let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap(); - test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg)); + dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg); } } for update_fulfill in update_fulfill_htlcs { - test_err!(dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill)); + dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill); } for update_fail in update_fail_htlcs { - test_err!(dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail)); + dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail); } for update_fail_malformed in update_fail_malformed_htlcs { - test_err!(dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed)); + dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed); } - test_err!(dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed)); + dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed); } } }, events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for dest in nodes.iter() { if dest.get_our_node_id() == *node_id { - test_err!(dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg)); + dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg); } } }, events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for dest in nodes.iter() { if dest.get_our_node_id() == *node_id { - test_err!(dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg)); + dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg); } } }, @@ -516,6 +506,10 @@ pub fn do_test(data: &[u8]) { // Can be generated due to a payment forward being rejected due to a // channel having previously failed a monitor update }, + events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => { + // Can be generated at any processing step to send back an error, disconnect + // peer or just ignore + }, _ => panic!("Unhandled message event"), } } @@ -532,6 +526,7 @@ pub fn do_test(data: &[u8]) { events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -544,6 +539,7 @@ pub fn do_test(data: &[u8]) { events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -565,6 +561,7 @@ pub fn do_test(data: &[u8]) { }, events::MessageSendEvent::SendFundingLocked { .. } => false, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false, + events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false, _ => panic!("Unhandled message event"), }; if push { msg_sink.push(event); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 205f1a4bd..8849c2ed5 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -6,7 +6,7 @@ use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash}; use ln::channelmonitor::ChannelMonitorUpdateErr; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler}; +use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler, ErrorAction}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; @@ -76,7 +76,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(events_2.len(), 1); let payment_event = SendEvent::from_event(events_2.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -113,7 +113,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); nodes[0].node.test_restore_channel_monitor(); check_added_monitors!(nodes[0], 1); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentFailed event @@ -180,7 +180,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(update_fee.is_none()); if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -190,9 +190,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { - assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); - } else { panic!(); } + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); } (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) @@ -221,9 +221,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -243,10 +243,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); check_added_monitors!(nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); check_added_monitors!(nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); @@ -270,7 +270,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -280,7 +280,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed); let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -315,8 +315,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -335,7 +335,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let bs_second_commitment_update; macro_rules! handle_bs_raa { () => { - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); assert!(as_commitment_update.update_add_htlcs.is_empty()); assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); @@ -346,7 +346,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } } macro_rules! handle_initial_raa { () => { - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack); bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); @@ -410,21 +410,21 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } } - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -482,12 +482,12 @@ fn test_monitor_update_fail_cs() { check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -500,7 +500,7 @@ fn test_monitor_update_fail_cs() { match responses[0] { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), @@ -515,9 +515,9 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, @@ -529,7 +529,7 @@ fn test_monitor_update_fail_cs() { check_added_monitors!(nodes[0], 1); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -561,13 +561,13 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -611,21 +611,21 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { - assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); - } else { panic!(); } + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); @@ -633,24 +633,24 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[0], 1); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0); check_added_monitors!(nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); expect_payment_received!(nodes[0], our_payment_hash_2, 1000000); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], our_payment_hash_1, 1000000); @@ -682,7 +682,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); check_added_monitors!(nodes[0], 0); @@ -695,7 +695,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -704,9 +704,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -721,7 +721,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); @@ -736,7 +736,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -767,10 +767,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { - assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); - } else { panic!(); } + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) @@ -819,7 +819,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now deliver the new messages... - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -828,12 +828,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(rejected_by_dest); } else { panic!("Unexpected event!"); } - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); if test_ignore_second_cs { - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()); check_added_monitors!(nodes[2], 1); let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(bs_cs.update_add_htlcs.is_empty()); @@ -842,7 +842,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(bs_cs.update_fulfill_htlcs.is_empty()); assert!(bs_cs.update_fee.is_none()); - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); assert!(as_cs.update_add_htlcs.is_empty()); @@ -851,19 +851,19 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(as_cs.update_fulfill_htlcs.is_empty()); assert!(as_cs.update_fee.is_none()); - nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } else { @@ -886,7 +886,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_node(&nodes[1]); assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); assert_eq!(send_event.msgs.len(), 1); - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); @@ -931,7 +931,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -943,11 +943,11 @@ fn test_monitor_update_fail_reestablish() { let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -959,9 +959,9 @@ fn test_monitor_update_fail_reestablish() { assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -975,7 +975,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1013,18 +1013,18 @@ fn raa_no_response_awaiting_raa_state() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1032,15 +1032,15 @@ fn raa_no_response_awaiting_raa_state() { // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { - assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); - } else { panic!(); } + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); @@ -1058,36 +1058,36 @@ fn raa_no_response_awaiting_raa_state() { check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, 1000000); let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_3, 1000000); @@ -1123,16 +1123,16 @@ fn claim_while_disconnected_monitor_update_fail() { let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1144,10 +1144,10 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { - assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); - } else { panic!(); } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've test_restore_channel_monitor'd and updated for the new commitment transaction. @@ -1163,12 +1163,12 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[0] { MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1177,7 +1177,7 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[1] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), @@ -1186,20 +1186,20 @@ fn claim_while_disconnected_monitor_update_fail() { let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, 1000000); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1234,10 +1234,10 @@ fn monitor_failed_no_reestablish_response() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] @@ -1251,21 +1251,21 @@ fn monitor_failed_no_reestablish_response() { let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap(); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); nodes[1].node.test_restore_channel_monitor(); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1300,14 +1300,14 @@ fn first_message_on_recv_ordering() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1327,18 +1327,18 @@ fn first_message_on_recv_ordering() { // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { - assert_eq!(err, "Failed to update ChannelMonitor"); - } else { panic!(); } + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with // the appropriate HTLC acceptance). - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { - assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); - } else { panic!(); } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); nodes[1].node.test_restore_channel_monitor(); @@ -1348,13 +1348,13 @@ fn first_message_on_recv_ordering() { expect_payment_received!(nodes[1], payment_hash_1, 1000000); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1396,11 +1396,14 @@ fn test_monitor_update_fail_claim() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap(); + nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true); let msg_events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1425,7 +1428,7 @@ fn test_monitor_update_fail_claim() { check_added_monitors!(nodes[1], 1); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1454,7 +1457,7 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1466,21 +1469,22 @@ fn test_monitor_update_on_pending_forwards() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); nodes[1].node.test_restore_channel_monitor(); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap(); - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1520,26 +1524,28 @@ fn monitor_update_claim_fail_no_response() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); nodes[1].node.test_restore_channel_monitor(); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, 1000000); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1565,8 +1571,8 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: let mut nodes = create_network(2, &[None, None]); nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap(); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap(); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap(); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43); @@ -1577,7 +1583,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: check_added_monitors!(nodes[0], 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap(); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); check_added_monitors!(nodes[1], 1); if restore_between_fails { @@ -1595,23 +1601,19 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update) assert!(fail_on_generate); // Somebody has to fail } - let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); if fail_on_signed || !restore_between_fails { - if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() { - if fail_on_generate && !restore_between_fails { - assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast"); - check_added_monitors!(nodes[0], 0); - } else { - assert_eq!(err, "Failed to update ChannelMonitor"); - check_added_monitors!(nodes[0], 1); - } - } else { panic!(); } - + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + if fail_on_generate && !restore_between_fails { + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1); + check_added_monitors!(nodes[0], 0); + } else { + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); + check_added_monitors!(nodes[0], 1); + } assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); nodes[0].node.test_restore_channel_monitor(); - } else { - funding_signed_res.unwrap(); } check_added_monitors!(nodes[0], 1); @@ -1628,7 +1630,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: if confirm_a_first { confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version); - nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap(); + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())); } else { assert!(!restore_b_before_conf); confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version); @@ -1653,7 +1655,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: check_added_monitors!(nodes[1], 1); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { - nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap(); + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version); let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 417b857aa..aed442517 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -429,19 +429,22 @@ pub struct ChannelDetails { } macro_rules! handle_error { - ($self: ident, $internal: expr) => { + ($self: ident, $internal: expr, $their_node_id: expr, $locked_channel_state: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { if let Some((shutdown_res, update_option)) = shutdown_finish { $self.finish_force_close_channel(shutdown_res); if let Some(update) = update_option { - let mut channel_state = $self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } } + log_error!($self, "{}", err.err); + if let msgs::ErrorAction::IgnoreError = err.action { + } else { $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() }); } + // Return error in case higher-API need one Err(err) }, } @@ -1116,8 +1119,8 @@ impl ChannelManager { let _ = self.total_consistency_lock.read().unwrap(); + let mut channel_lock = self.channel_state.lock().unwrap(); let err: Result<(), _> = loop { - let mut channel_lock = self.channel_state.lock().unwrap(); let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), @@ -1167,20 +1170,9 @@ impl ChannelManager { return Ok(()); }; - match handle_error!(self, err) { + match handle_error!(self, err, route.hops.first().unwrap().pubkey, channel_lock) { Ok(_) => unreachable!(), - Err(e) => { - if let msgs::ErrorAction::IgnoreError = e.action { - } else { - log_error!(self, "Got bad keys: {}!", e.err); - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: route.hops.first().unwrap().pubkey, - action: e.action, - }); - } - Err(APIError::ChannelUnavailable { err: e.err }) - }, + Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) } } } @@ -1197,32 +1189,22 @@ impl ChannelManager { let _ = self.total_consistency_lock.read().unwrap(); let (mut chan, msg, chan_monitor) = { - let (res, chan) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.remove(temporary_channel_id) { - Some(mut chan) => { - (chan.get_outbound_funding_created(funding_txo) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) - } else { unreachable!(); }) - , chan) - }, - None => return - } + let mut channel_state = self.channel_state.lock().unwrap(); + let (res, chan) = match channel_state.by_id.remove(temporary_channel_id) { + Some(mut chan) => { + (chan.get_outbound_funding_created(funding_txo) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + } else { unreachable!(); }) + , chan) + }, + None => return }; - match handle_error!(self, res) { + match handle_error!(self, res, chan.get_their_node_id(), channel_state) { Ok(funding_msg) => { (chan, funding_msg.0, funding_msg.1) }, - Err(e) => { - log_error!(self, "Got bad signatures: {}!", e.err); - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_their_node_id(), - action: e.action, - }); - return; - }, + Err(_) => { return; } } }; // Because we have exclusive ownership of the channel here we can release the channel_state @@ -1230,17 +1212,12 @@ impl ChannelManager { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { - match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) { - Err(e) => { - log_error!(self, "Failed to store ChannelMonitor update for funding tx generation"); - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_their_node_id(), - action: e.action, - }); - return; - }, - Ok(()) => unreachable!(), + { + let mut channel_state = self.channel_state.lock().unwrap(); + match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) { + Err(_) => { return; }, + Ok(()) => unreachable!(), + } } }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -1417,22 +1394,9 @@ impl ChannelManager { }, ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; - match handle_error!(self, err) { + match handle_error!(self, err, their_node_id, channel_state) { Ok(_) => unreachable!(), - Err(e) => { - match e.action { - msgs::ErrorAction::IgnoreError => {}, - _ => { - log_error!(self, "Got bad keys: {}!", e.err); - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: their_node_id, - action: e.action, - }); - }, - } - continue; - }, + Err(_) => { continue; }, } } }; @@ -1489,19 +1453,10 @@ impl ChannelManager { }; } - for (their_node_id, err) in handle_errors.drain(..) { - match handle_error!(self, err) { - Ok(_) => {}, - Err(e) => { - if let msgs::ErrorAction::IgnoreError = e.action { - } else { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: their_node_id, - action: e.action, - }); - } - }, + if handle_errors.len() > 0 { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + for (their_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, their_node_id, channel_state_lock); } } @@ -1754,19 +1709,7 @@ impl ChannelManager { return; }; - match handle_error!(self, err) { - Ok(_) => {}, - Err(e) => { - if let msgs::ErrorAction::IgnoreError = e.action { - } else { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: their_node_id, - action: e.action, - }); - } - }, - } + let _ = handle_error!(self, err, their_node_id, channel_state_lock); } /// Gets the node_id held by this ChannelManager @@ -1914,13 +1857,11 @@ impl ChannelManager { match channel_state.by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, - //TODO: same as above hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } }; @@ -1941,7 +1882,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove()) @@ -1993,7 +1933,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); @@ -2019,7 +1958,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); @@ -2052,7 +1990,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { if chan_entry.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry); @@ -2099,7 +2036,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { if chan_entry.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry); @@ -2155,7 +2091,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if !chan.get().is_usable() { @@ -2204,7 +2139,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) @@ -2222,7 +2156,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); @@ -2238,7 +2171,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if (msg.failure_code & 0x8000) == 0 { @@ -2257,7 +2189,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = @@ -2334,7 +2265,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); @@ -2379,7 +2309,6 @@ impl ChannelManager { match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan); @@ -2508,9 +2437,9 @@ impl ChannelManager { #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); + let mut channel_state_lock = self.channel_state.lock().unwrap(); let their_node_id; let err: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); match channel_state.by_id.entry(channel_id) { @@ -2549,20 +2478,9 @@ impl ChannelManager { return Ok(()) }; - match handle_error!(self, err) { + match handle_error!(self, err, their_node_id, channel_state_lock) { Ok(_) => unreachable!(), - Err(e) => { - if let msgs::ErrorAction::IgnoreError = e.action { - } else { - log_error!(self, "Got bad keys: {}!", e.err); - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: their_node_id, - action: e.action, - }); - } - Err(APIError::APIMisuseError { err: e.err }) - }, + Err(e) => { Err(APIError::APIMisuseError { err: e.err })} } } } @@ -2732,85 +2650,148 @@ impl ChainListener for ChannelManager { } impl ChannelMessageHandler for ChannelManager { - //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), LightningError> { + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg)) + let res = self.internal_open_channel(their_node_id, their_local_features, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), LightningError> { + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg)) + let res = self.internal_accept_channel(their_node_id, their_local_features, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), LightningError> { + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_created(their_node_id, msg)) + let res = self.internal_funding_created(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), LightningError> { + fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_signed(their_node_id, msg)) + let res = self.internal_funding_signed(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), LightningError> { + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_locked(their_node_id, msg)) + let res = self.internal_funding_locked(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), LightningError> { + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_shutdown(their_node_id, msg)) + let res = self.internal_shutdown(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), LightningError> { + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_closing_signed(their_node_id, msg)) + let res = self.internal_closing_signed(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { + fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) + let res = self.internal_update_add_htlc(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { + fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) + let res = self.internal_update_fulfill_htlc(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) + let res = self.internal_update_fail_htlc(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { + fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) + let res = self.internal_update_fail_malformed_htlc(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) + let res = self.internal_commitment_signed(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) + let res = self.internal_revoke_and_ack(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), LightningError> { + fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fee(their_node_id, msg)) + let res = self.internal_update_fee(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { + fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) + let res = self.internal_announcement_signatures(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) + let res = self.internal_channel_reestablish(their_node_id, msg); + if res.is_err() { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let _ = handle_error!(self, res, *their_node_id, channel_state_lock); + } } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e99e96097..f3b3302ea 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -180,8 +180,8 @@ pub fn create_funding_transaction(node: &Node, expected_chan_value: u64, expecte pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction { node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); - node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap(); - node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap(); + node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); + node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())); let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42); @@ -193,7 +193,7 @@ pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, c added_monitors.clear(); } - node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap(); + node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); { let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -201,7 +201,7 @@ pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, c added_monitors.clear(); } - node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap(); + node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())); { let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -224,7 +224,7 @@ pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, c pub fn create_chan_between_nodes_with_value_confirm_first(node_recv: &Node, node_conf: &Node, tx: &Transaction) { confirm_transaction(&node_conf.block_notifier, &node_conf.chain_monitor, &tx, tx.version); - node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())).unwrap(); + node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())); } pub fn create_chan_between_nodes_with_value_confirm_second(node_recv: &Node, node_conf: &Node) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { @@ -260,9 +260,9 @@ pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, chan } pub fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { - node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap(); + node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0); let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id()); - node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); + node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1); let events_7 = node_b.node.get_and_clear_pending_msg_events(); assert_eq!(events_7.len(), 1); @@ -273,7 +273,7 @@ pub fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_f _ => panic!("Unexpected event"), }; - node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap(); + node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs); let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { @@ -344,15 +344,24 @@ macro_rules! get_closing_signed_broadcast { } macro_rules! check_closed_broadcast { - ($node: expr) => {{ + ($node: expr, $with_error_msg: expr) => {{ let events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 }); match events[0] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & 2, 2); }, _ => panic!("Unexpected event"), } + if $with_error_msg { + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + // TODO: Check node_id + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None } }} } @@ -362,7 +371,7 @@ pub fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8 let (tx_a, tx_b); node_a.close_channel(channel_id).unwrap(); - node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap(); + node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())); let events_1 = node_b.get_and_clear_pending_msg_events(); assert!(events_1.len() >= 1); @@ -387,15 +396,15 @@ pub fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8 }) }; - node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap(); + node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b); let (as_update, bs_update) = if close_inbound_first { assert!(node_a.get_and_clear_pending_msg_events().is_empty()); - node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap(); + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); assert!(none_b.is_none()); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); @@ -404,12 +413,12 @@ pub fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8 } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); - node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap(); + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); - node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); @@ -465,7 +474,7 @@ macro_rules! commitment_signed_dance { { check_added_monitors!($node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); - $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed); check_added_monitors!($node_a, 1); commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } @@ -475,10 +484,10 @@ macro_rules! commitment_signed_dance { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); - $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!($node_b, 1); - $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap(); + $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let events = $node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); @@ -502,7 +511,7 @@ macro_rules! commitment_signed_dance { { check_added_monitors!($node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); - $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed); check_added_monitors!($node_a, 1); let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); assert!(extra_msg_option.is_none()); @@ -512,7 +521,7 @@ macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { { let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); - $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!($node_a, 1); extra_msg_option } @@ -605,7 +614,7 @@ pub fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_rou for (idx, &node) in expected_route.iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); - node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); @@ -669,7 +678,7 @@ pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], s macro_rules! last_update_fulfill_dance { ($node: expr, $prev_node: expr) => { { - $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); + $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); check_added_monitors!($node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); @@ -679,7 +688,7 @@ pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], s macro_rules! mid_update_fulfill_dance { ($node: expr, $prev_node: expr, $new_msgs: expr) => { { - $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); + $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); check_added_monitors!($node, 1); let new_next_msgs = if $new_msgs { get_next_msgs!($node) @@ -763,7 +772,7 @@ pub fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], sk macro_rules! update_fail_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { - $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); + $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node); if skip_last && $last_node { expect_pending_htlcs_forwardable!($node); @@ -1106,7 +1115,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, let mut resp_1 = Vec::new(); for msg in reestablish_1 { - node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); + node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg); resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); } if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { @@ -1117,7 +1126,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, let mut resp_2 = Vec::new(); for msg in reestablish_2 { - node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap(); + node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg); resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); } if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { @@ -1132,7 +1141,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, for chan_msgs in resp_1.drain(..) { if send_funding_locked.0 { - node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -1145,7 +1154,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap(); + node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { @@ -1162,23 +1171,23 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap(); + node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap(); + node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap(); + node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail); } if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); } else { - node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } @@ -1189,7 +1198,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, for chan_msgs in resp_2.drain(..) { if send_funding_locked.1 { - node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -1202,7 +1211,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap(); + node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { @@ -1217,23 +1226,23 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap(); + node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap(); + node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap(); + node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail); } if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); } else { - node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e9951c206..af656320e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -68,14 +68,18 @@ fn test_insane_channel_opens() { // Test helper that asserts we get the correct error string given a mutator // that supposedly makes the channel open message insane - let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { - match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) { - Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => { - assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str) - }, - Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")}, - _ => panic!("insane OpenChannel message was somehow Ok"), - } + let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { .. } => { + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), expected_error_str.to_string(), 1); + }, + _ => panic!("unexpected event!"), + } + } else { assert!(false); } }; use ln::channel::MAX_FUNDING_SATOSHIS; @@ -140,7 +144,7 @@ fn test_async_inbound_update_fee() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -156,19 +160,19 @@ fn test_async_inbound_update_fee() { assert_eq!(payment_event.msgs.len(), 1); // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); // deliver(1), generate (3): - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2) let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(bs_update.update_add_htlcs.is_empty()); // (4) assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) @@ -177,7 +181,7 @@ fn test_async_inbound_update_fee() { assert!(bs_update.update_fee.is_none()); // (4) check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3) + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3) let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); assert!(as_update.update_add_htlcs.is_empty()); // (5) assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) @@ -186,16 +190,16 @@ fn test_async_inbound_update_fee() { assert!(as_update.update_fee.is_none()); // (5) check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4) + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4) let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // only (6) so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5) + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5) let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke); check_added_monitors!(nodes[0], 1); let events_2 = nodes[0].node.get_and_clear_pending_events(); @@ -205,7 +209,7 @@ fn test_async_inbound_update_fee() { _ => panic!("Unexpected event"), } - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6) check_added_monitors!(nodes[1], 1); } @@ -233,7 +237,7 @@ fn test_update_fee_unordered_raa() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -249,13 +253,13 @@ fn test_update_fee_unordered_raa() { assert_eq!(payment_event.msgs.len(), 1); // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2) check_added_monitors!(nodes[1], 1); // We can't continue, sadly, because our (1) now has a bogus signature @@ -301,8 +305,8 @@ fn test_multi_flight_update_fee() { }; // Deliver first update_fee/commitment_signed pair, generating (1) and (2): - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1); let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -318,14 +322,14 @@ fn test_multi_flight_update_fee() { feerate_per_kw: (initial_feerate + 30) as u32, }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2); update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; // Deliver (3) - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2); // Deliver (1), generating (3) and (4) - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg); let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); assert!(as_second_update.update_add_htlcs.is_empty()); @@ -337,30 +341,30 @@ fn test_multi_flight_update_fee() { assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); // Deliver (2) commitment_signed - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed); let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Delever (4) - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment); let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -383,22 +387,22 @@ fn test_update_fee_vanilla() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -415,7 +419,7 @@ fn test_update_fee_that_funder_cannot_afford() { check_added_monitors!(nodes[0], 1); let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()); commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); @@ -440,17 +444,13 @@ fn test_update_fee_that_funder_cannot_afford() { let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()); //While producing the commitment_signed response after handling a received update_fee request the //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. - let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err(); - - assert!(match err.err { - "Funding remote cannot afford proposed new fee" => true, - _ => false, - }); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); //clear the message we could not handle nodes[1].node.get_and_clear_pending_msg_events(); @@ -477,8 +477,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -497,15 +497,15 @@ fn test_update_fee_with_fundee_update_add_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // node[1] has nothing to do - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg); check_added_monitors!(nodes[1], 1); // AwaitingRemoteRevoke ends here @@ -516,21 +516,21 @@ fn test_update_fee_with_fundee_update_add_htlc() { assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); assert_eq!(commitment_update.update_fee.is_none(), true); - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[1], 1); let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -583,15 +583,15 @@ fn test_update_fee() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); // Generate (2) and (3): - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); // Deliver (2): - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -607,26 +607,26 @@ fn test_update_fee() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); check_added_monitors!(nodes[1], 1); // ... creating (5) let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes // Handle (3), creating (6): - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0); check_added_monitors!(nodes[0], 1); let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes // Deliver (5): - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); // Deliver (6), creating (7): - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0); let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(commitment_update.update_add_htlcs.is_empty()); assert!(commitment_update.update_fulfill_htlcs.is_empty()); @@ -636,12 +636,12 @@ fn test_update_fee() { check_added_monitors!(nodes[1], 1); // Deliver (7) - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -661,14 +661,14 @@ fn pre_funding_lock_shutdown_test() { nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); assert!(node_0_none.is_none()); @@ -689,9 +689,9 @@ fn updates_shutdown_wait() { nodes[0].node.close_channel(&chan_1.2).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -710,7 +710,7 @@ fn updates_shutdown_wait() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -720,7 +720,7 @@ fn updates_shutdown_wait() { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -733,9 +733,9 @@ fn updates_shutdown_wait() { } let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); assert!(node_0_none.is_none()); @@ -768,13 +768,13 @@ fn htlc_fail_async_shutdown() { nodes[1].node.close_channel(&chan_1.2).unwrap(); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -784,7 +784,7 @@ fn htlc_fail_async_shutdown() { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -814,9 +814,9 @@ fn htlc_fail_async_shutdown() { } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); assert!(node_0_none.is_none()); @@ -841,10 +841,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.close_channel(&chan_1.2).unwrap(); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); if recv_count > 0 { - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); if recv_count > 1 { - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); } } @@ -856,21 +856,21 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); assert!(node_1_shutdown == node_1_2nd_shutdown); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); let node_0_2nd_shutdown = if recv_count > 0 { let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown); node_0_2nd_shutdown } else { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown); get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) }; - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -883,7 +883,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -893,7 +893,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -907,7 +907,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); if recv_count > 0 { - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); assert!(node_1_closing_signed.is_some()); } @@ -922,24 +922,24 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // If all closing_signeds weren't delivered we can just resume where we left off... let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown); let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); assert!(node_0_closing_signed == node_0_2nd_closing_signed); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap(); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); assert!(node_0_none.is_none()); } else { @@ -952,17 +952,23 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // transaction. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) = - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { - nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); - let msgs::ErrorMessage {ref channel_id, ..} = msg; - assert_eq!(*channel_id, chan_1.2); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); + assert_eq!(msg.channel_id, chan_1.2); + }, + _ => panic!("Unexpected event!"), + } } else { panic!("Needed SendErrorMessage close"); } // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and // checks it, but in this case nodes[0] didn't ever get a chance to receive a // closing_signed so we do it ourselves - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); } assert!(nodes[0].node.list_channels().is_empty()); @@ -1077,7 +1083,11 @@ fn fake_network_test() { let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0; let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0; + route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); //TODO: Test that routes work again here as we've been notified that the channel is full @@ -1124,6 +1134,8 @@ fn holding_cell_htlc_counting() { if let APIError::ChannelUnavailable { err } = nodes[1].node.send_payment(route, payment_hash_1).unwrap_err() { assert_eq!(err, "Cannot push more than their max accepted HTLCs"); } else { panic!("Unexpected event"); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); // This should also be true if we try to forward a payment. let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap(); @@ -1136,7 +1148,7 @@ fn holding_cell_htlc_counting() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. @@ -1145,7 +1157,7 @@ fn holding_cell_htlc_counting() { check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1168,35 +1180,35 @@ fn holding_cell_htlc_counting() { } // Now forward all the pending HTLCs and claim them back - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]).unwrap(); - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors!(nodes[1], 1); let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); for ref update in as_updates.update_add_htlcs.iter() { - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update); } - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed); check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors!(nodes[1], 1); let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa); check_added_monitors!(nodes[2], 1); expect_pending_htlcs_forwardable!(nodes[2]); @@ -1316,7 +1328,6 @@ fn test_duplicate_htlc_different_direction_onchain() { } fn do_channel_reserve_test(test_recv: bool) { - use ln::msgs::LightningError; let mut nodes = create_network(3, &[None, None, None]); let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new()); @@ -1360,6 +1371,8 @@ fn do_channel_reserve_test(test_recv: bool) { APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"), _ => panic!("Unknown error variants"), } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); } let mut htlc_id = 0; @@ -1396,6 +1409,8 @@ fn do_channel_reserve_test(test_recv: bool) { APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"), _ => panic!("Unknown error variants"), } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over their reserve value".to_string(), 1); } // adding pending output @@ -1411,7 +1426,7 @@ fn do_channel_reserve_test(test_recv: bool) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); // channel reserve test with htlc pending output > 0 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat; @@ -1421,6 +1436,8 @@ fn do_channel_reserve_test(test_recv: bool) { APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"), _ => panic!("Unknown error variants"), } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over their reserve value".to_string(), 2); } { @@ -1450,14 +1467,12 @@ fn do_channel_reserve_test(test_recv: bool) { }; if test_recv { - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); - match err { - LightningError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); // If we send a garbage message, the channel should get closed, making the rest of this test case fail. assert_eq!(nodes[1].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value"); return; } } @@ -1485,6 +1500,8 @@ fn do_channel_reserve_test(test_recv: bool) { APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"), _ => panic!("Unknown error variants"), } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over their reserve value".to_string(), 3); } let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22); @@ -1495,27 +1512,27 @@ fn do_channel_reserve_test(test_recv: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // flush the pending htlc - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack); check_added_monitors!(nodes[0], 1); let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); let ref payment_event_11 = expect_forward!(nodes[1]); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -1523,15 +1540,15 @@ fn do_channel_reserve_test(test_recv: bool) { // flush the htlcs in the holding cell assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap(); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]); commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); let ref payment_event_3 = expect_forward!(nodes[1]); assert_eq!(payment_event_3.msgs.len(), 2); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap(); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -1627,27 +1644,27 @@ fn channel_reserve_in_flight_removes() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); expect_payment_sent!(nodes[0], payment_preimage_1); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); check_added_monitors!(nodes[1], 1); // B is already AwaitingRAA, so cant generate a CS here let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -1656,13 +1673,13 @@ fn channel_reserve_in_flight_removes() { // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A // can no longer broadcast a commitment transaction with it and B has the preimage so can go // on-chain as necessary). - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); expect_payment_sent!(nodes[0], payment_preimage_2); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1671,7 +1688,7 @@ fn channel_reserve_in_flight_removes() { // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't // resolve the second HTLC from A's point of view. - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -1687,29 +1704,29 @@ fn channel_reserve_in_flight_removes() { SendEvent::from_event(events.remove(0)) }; - nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap(); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // Now just resolve all the outstanding messages/HTLCs for completeness... - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); @@ -2193,7 +2210,7 @@ fn test_htlc_on_chain_success() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); - check_closed_broadcast!(nodes[2]); + check_closed_broadcast!(nodes[2], false); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx) assert_eq!(node_txn.len(), 7); assert_eq!(node_txn[0], node_txn[3]); @@ -2280,7 +2297,7 @@ fn test_htlc_on_chain_success() { let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn(); check_spends!(commitment_tx[0], chan_1.3.clone()); nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + 2*HTLC-Success), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan) assert_eq!(node_txn.len(), 5); assert_eq!(node_txn[0], node_txn[4]); @@ -2299,7 +2316,7 @@ fn test_htlc_on_chain_success() { // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); let mut first_claimed = false; @@ -2363,7 +2380,7 @@ fn test_htlc_on_chain_timeout() { _ => panic!("Unexpected event"), }; nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); - check_closed_broadcast!(nodes[2]); + check_closed_broadcast!(nodes[2], false); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan_2.3.clone()); @@ -2396,7 +2413,7 @@ fn test_htlc_on_chain_timeout() { nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![timeout_tx]}, 1); connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash()); check_added_monitors!(nodes[1], 0); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -2420,7 +2437,7 @@ fn test_htlc_on_chain_timeout() { check_spends!(commitment_tx[0], chan_1.3.clone()); nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan assert_eq!(node_txn.len(), 4); assert_eq!(node_txn[0], node_txn[3]); @@ -2455,7 +2472,7 @@ fn test_simple_commitment_revoked_fail_backward() { nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash()); check_added_monitors!(nodes[1], 0); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -2469,7 +2486,7 @@ fn test_simple_commitment_revoked_fail_backward() { assert!(update_fail_malformed_htlcs.is_empty()); assert_eq!(nodes[0].node.get_our_node_id(), *node_id); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2537,7 +2554,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); // Drop the last RAA from 3 -> 2 @@ -2550,12 +2567,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash)); @@ -2567,14 +2584,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); // At this point first_payment_hash has dropped out of the latest two commitment // transactions that nodes[1] is tracking... - nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting @@ -2587,7 +2604,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[1], 0); if deliver_bs_raa { - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa); // One monitor for the new revocation preimage, no second on as we won't generate a new // commitment transaction for nodes[0] until process_pending_htlc_forwards(). check_added_monitors!(nodes[1], 1); @@ -2651,9 +2668,9 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(update_fail_malformed_htlcs.is_empty()); assert_eq!(nodes[0].node.get_our_node_id(), *node_id); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]).unwrap(); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); @@ -2721,14 +2738,14 @@ fn test_htlc_ignore_latest_remote_commitment() { route_payment(&nodes[0], &[&nodes[1]], 10000000); nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); // Duplicate the block_connected call since this may happen due to other listeners // registering new transactions @@ -2755,7 +2772,7 @@ fn test_force_close_fail_back() { SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2766,8 +2783,8 @@ fn test_force_close_fail_back() { assert_eq!(payment_event.msgs.len(), 1); check_added_monitors!(nodes[1], 1); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -2776,7 +2793,7 @@ fn test_force_close_fail_back() { // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); - check_closed_broadcast!(nodes[2]); + check_closed_broadcast!(nodes[2], false); let tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -2790,7 +2807,7 @@ fn test_force_close_fail_back() { nodes[1].block_notifier.block_connected_checked(&header, 1, &[&tx], &[1]); // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { @@ -2832,7 +2849,7 @@ fn test_unconf_chan() { nodes[0].node.block_disconnected(&headers.pop().unwrap(), height); height -= 1; } - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let channel_state = nodes[0].node.channel_state.lock().unwrap(); assert_eq!(channel_state.by_id.len(), 0); assert_eq!(channel_state.short_to_id.len(), 0); @@ -2918,25 +2935,25 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { if messages_delivered < 2 { // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! } else { - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); if messages_delivered >= 3 { - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); if messages_delivered >= 4 { - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); if messages_delivered >= 5 { - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); if messages_delivered >= 6 { - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -3007,7 +3024,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { }; if messages_delivered >= 1 { - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -3019,23 +3036,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { } if messages_delivered >= 2 { - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[0], 1); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); if messages_delivered >= 3 { - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); if messages_delivered >= 4 { - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); if messages_delivered >= 5 { - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); } @@ -3141,8 +3158,8 @@ fn test_funding_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked).unwrap(); - nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs).unwrap(); + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked); + nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs); let events_3 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 2); let as_announcement_sigs = match events_3[0] { @@ -3159,7 +3176,7 @@ fn test_funding_peer_disconnect() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs).unwrap(); + nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); let (_, bs_update) = match events_4[0] { @@ -3215,7 +3232,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -3225,7 +3242,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -3243,9 +3260,9 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -3261,13 +3278,13 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()); let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); @@ -3276,7 +3293,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(bs_second_commitment_signed.update_fee.is_none()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); assert!(as_commitment_signed.update_add_htlcs.is_empty()); assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); @@ -3285,17 +3302,17 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_commitment_signed.update_fee.is_none()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -3310,7 +3327,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -3330,7 +3347,7 @@ fn test_invalid_channel_announcement() { let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap(); let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap(); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); + nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().inner.funding_key); let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().inner.funding_key); @@ -3438,9 +3455,9 @@ fn test_no_txn_manager_serialize_deserialize() { nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); @@ -3571,9 +3588,17 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id()); - if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { - assert_eq!(msg.channel_id, channel_id); - } else { panic!("Unexpected result"); } + nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.channel_id, channel_id); + }, + _ => panic!("Unexpected event!"), + } + } } macro_rules! check_spendable_outputs { @@ -3695,7 +3720,7 @@ fn test_claim_sizeable_push_msat() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new()); nodes[1].node.force_close_channel(&chan.2); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3.clone()); @@ -3716,7 +3741,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new()); nodes[0].node.force_close_channel(&chan.2); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -3725,7 +3750,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 0); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 2); assert_eq!(spend_txn[0], spend_txn[1]); @@ -3748,7 +3773,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000); let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); let spend_txn = check_spendable_outputs!(nodes[1], 1); @@ -3819,7 +3844,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); @@ -3850,7 +3875,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; // A will generate HTLC-Timeout from revoked commitment tx nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(revoked_htlc_txn.len(), 3); @@ -3862,7 +3887,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // B will generate justice tx from A's revoked commitment/HTLC tx nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 5); @@ -3894,7 +3919,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; // B will generate HTLC-Success from revoked commitment tx nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(revoked_htlc_txn.len(), 3); @@ -3905,7 +3930,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // A will generate justice tx from B's revoked commitment/HTLC tx nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); @@ -3955,7 +3980,7 @@ fn test_onchain_to_onchain_claim() { assert!(updates.update_fail_malformed_htlcs.is_empty()); nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); - check_closed_broadcast!(nodes[2]); + check_closed_broadcast!(nodes[2], false); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 4); @@ -4015,7 +4040,7 @@ fn test_onchain_to_onchain_claim() { assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment assert_eq!(b_txn[2].lock_time, 0); // Success tx - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); } #[test] @@ -4037,7 +4062,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let htlc_timeout_tx; { // Extract one of the two HTLC-Timeout transaction @@ -4094,7 +4119,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); { commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); @@ -4124,7 +4149,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { assert!(updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); @@ -4249,10 +4274,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[4], 1); let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]).unwrap(); - nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]).unwrap(); - nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]).unwrap(); - nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]).unwrap(); + nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]); + nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]); + nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]); commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); // Fail 3rd below-dust and 7th above-dust HTLCs @@ -4263,8 +4288,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[5], 1); let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]).unwrap(); - nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]).unwrap(); + nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]); commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false); let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn(); @@ -4272,12 +4297,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_pending_htlcs_forwardable!(nodes[3]); check_added_monitors!(nodes[3], 1); let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]).unwrap(); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]).unwrap(); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]).unwrap(); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]).unwrap(); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]).unwrap(); - nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]).unwrap(); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]); if deliver_last_raa { commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false); } else { @@ -4304,7 +4329,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![ds_prev_commitment_tx[0].clone()]}, 1); } connect_blocks(&nodes[2].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash()); - check_closed_broadcast!(nodes[2]); + check_closed_broadcast!(nodes[2], false); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 2); @@ -4334,13 +4359,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 }); &nodes[1] }; - target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); - target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap(); - target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]).unwrap(); + target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]); + target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]); if announce_latest { - target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]).unwrap(); + target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]); if *node_id == nodes[0].node.get_our_node_id() { - target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]).unwrap(); + target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]); } } commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true); @@ -4439,7 +4464,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 200); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn[0].input.len(), 1); @@ -4492,7 +4517,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -4502,10 +4527,10 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0); check_added_monitors!(nodes[1], 1); let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; @@ -4514,7 +4539,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { header.prev_blockhash = header.bitcoin_hash(); } test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -4539,7 +4564,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { header.prev_blockhash = header.bitcoin_hash(); } test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -4557,18 +4582,18 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1); check_added_monitors!(nodes[1], 1); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); if check_revoke_no_close { - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[0], 1); } @@ -4579,7 +4604,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no } if !check_revoke_no_close { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); } else { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -4676,7 +4701,7 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: callback_node(); } // 0 => 1 update_add & CS - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_0).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_0); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); let update_1_0 = match test_case { @@ -4703,7 +4728,7 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: } // 1 => 2 - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_1).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_1); commitment_signed_dance!(nodes[2], nodes[1], update_1.commitment_signed, false, true); if test_case == 2 || test_case == 200 { @@ -4725,7 +4750,7 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: } // 2 => 1 - nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg).unwrap(); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg); commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true); // backward fail on 1 @@ -4742,9 +4767,9 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: if test_case == 100 { callback_fail(&mut fail_msg); } - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg); } else { - nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_1_0.update_fail_malformed_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_1_0.update_fail_malformed_htlcs[0]); }; commitment_signed_dance!(nodes[0], nodes[1], update_1_0.commitment_signed, false, true); @@ -5051,7 +5076,7 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i let push_msat=10001; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).unwrap(); let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &node0_to_1_send_open_channel).unwrap(); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &node0_to_1_send_open_channel); //Create a second channel with a channel_id collision assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err()); @@ -5121,6 +5146,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { } else { assert!(false); } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1); } #[test] @@ -5166,7 +5193,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() } SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); @@ -5182,6 +5209,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() } else { assert!(false); } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); } #[test] @@ -5203,6 +5232,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { } else { assert!(false); } + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); send_payment(&nodes[0], &[&nodes[1]], max_in_flight, max_in_flight); } @@ -5225,14 +5256,10 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote side tried to send less than our minimum HTLC value"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote side tried to send less than our minimum HTLC value"); } #[test] @@ -5250,16 +5277,11 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote HTLC add would put them over their reserve value"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value"); } #[test] @@ -5294,19 +5316,14 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { for i in 0..super::channel::OUR_MAX_HTLCS { msg.htlc_id = i as u64; - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); } msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to push more than our max accepted HTLCs"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to push more than our max accepted HTLCs"); } #[test] @@ -5320,16 +5337,11 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err,"Remote HTLC add would put them over our max HTLC value"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data,"Remote HTLC add would put them over our max HTLC value"); } #[test] @@ -5343,16 +5355,11 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].cltv_expiry = 500000000; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); } #[test] @@ -5367,7 +5374,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); //Disconnect and Reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -5378,27 +5385,23 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); //Resend HTLC - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote skipped HTLC ID"); - } else { - assert!(false); - } + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1]); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote skipped HTLC ID"); } #[test] @@ -5413,7 +5416,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFulfillHTLC{ channel_id: chan.2, @@ -5421,16 +5424,11 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { payment_preimage: our_payment_preimage, }; - let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); } #[test] @@ -5445,7 +5443,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailHTLC{ channel_id: chan.2, @@ -5453,16 +5451,11 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { reason: msgs::OnionErrorPacket { data: Vec::new()}, }; - let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); } #[test] @@ -5477,7 +5470,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() nodes[0].node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailMalformedHTLC{ channel_id: chan.2, @@ -5486,16 +5479,11 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() failure_code: 0x8000, }; - let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); } #[test] @@ -5528,15 +5516,11 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; - let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); } #[test] @@ -5569,15 +5553,11 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); - let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill HTLC with an incorrect preimage"); } @@ -5595,7 +5575,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); @@ -5615,15 +5595,11 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag } }; update_msg.failure_code &= !0x8000; - let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { - assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set"); - } else { - assert!(false); - } + nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0]); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); } #[test] @@ -5646,7 +5622,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -5658,7 +5634,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda //Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); @@ -5678,7 +5654,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda } }; - nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0).unwrap(); + nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); @@ -5726,8 +5702,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]).unwrap(); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed); check_added_monitors!(nodes[0], 1); // Cache one local commitment tx as lastest @@ -5990,28 +5966,28 @@ fn test_upfront_shutdown_script() { let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer - if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) { - match error.action { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); - }, - _ => { assert!(false); } - } - } else { assert!(false); } + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match events[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } + if let MessageSendEvent::HandleError { ref action, .. } = events[1] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); + }, + _ => { assert!(false); } + } + } else { assert!(false); } // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign - if let Ok(_) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {} - else { assert!(false) } + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -6026,8 +6002,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap(); let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - if let Ok(_) = nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown) {} - else { assert!(false) } + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -6041,8 +6016,7 @@ fn test_upfront_shutdown_script() { nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap(); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {} - else { assert!(false) } + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -6056,8 +6030,7 @@ fn test_upfront_shutdown_script() { nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap(); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {} - else { assert!(false) } + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -6103,12 +6076,13 @@ fn test_user_configurable_csv_delay() { // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42).unwrap(); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap(); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); accept_channel.to_self_delay = 200; - if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) { - match error.action { - ErrorAction::SendErrorMessage { msg } => { + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel); + if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); }, _ => { assert!(false); } @@ -6186,14 +6160,7 @@ fn test_data_loss_protect() { let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); // Check we update monitor following learning of per_commitment_point from B - if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) { - match err.action { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); - }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]); check_added_monitors!(nodes[0], 1); { @@ -6207,26 +6174,36 @@ fn test_data_loss_protect() { assert_eq!(*node_id, nodes[1].node.get_our_node_id()); reestablish_1.push(msg.clone()); } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg { + } else if let MessageSendEvent::HandleError { ref action, .. } = msg { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); + }, + _ => panic!("Unexpected event!"), + } } else { panic!("Unexpected event") } } // Check we close channel detecting A is fallen-behind - if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) { - match err.action { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } - + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match events[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } + match events [1] { + MessageSendEvent::HandleError { ref action, .. } => { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, + _ => panic!("Unexpected event!"), + } + }, + _ => panic!("Unexpected event"), + } // Check A is able to claim to_remote output let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -6273,7 +6250,7 @@ fn test_check_htlc_underpaying() { }; check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -6327,24 +6304,23 @@ fn test_announce_disable_channels() { assert_eq!(reestablish_2.len(), 3); // Reestablish chan_1 - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_2 - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_3 - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]).unwrap(); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[0].node.timer_chan_freshness_every_min(); - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } #[test] @@ -6469,7 +6445,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - check_closed_broadcast!(nodes[1]); + check_closed_broadcast!(nodes[1], false); let mut received = ::std::usize::MAX; let mut offered = ::std::usize::MAX; @@ -6584,7 +6560,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!(node_txn.len(), 2); //TODO: should be zero when we fix check_spend_remote_htlc node_txn.clear(); } - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); } #[test] @@ -6813,7 +6789,7 @@ fn test_bump_txn_sanitize_tracking_maps() { let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, false, Default::default()); let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129); - check_closed_broadcast!(nodes[0]); + check_closed_broadcast!(nodes[0], false); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 7); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 277d96d16..ea1953eb8 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -616,42 +616,42 @@ pub enum OptionalField { pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync { //Channel init: /// Handle an incoming open_channel message from the given peer. - fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel) -> Result<(), LightningError>; + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel); /// Handle an incoming accept_channel message from the given peer. - fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel) -> Result<(), LightningError>; + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel); /// Handle an incoming funding_created message from the given peer. - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated) -> Result<(), LightningError>; + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated); /// Handle an incoming funding_signed message from the given peer. - fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned) -> Result<(), LightningError>; + fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned); /// Handle an incoming funding_locked message from the given peer. - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked) -> Result<(), LightningError>; + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked); // Channl close: /// Handle an incoming shutdown message from the given peer. - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown) -> Result<(), LightningError>; + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown); /// Handle an incoming closing_signed message from the given peer. - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned) -> Result<(), LightningError>; + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned); // HTLC handling: /// Handle an incoming update_add_htlc message from the given peer. - fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC) -> Result<(), LightningError>; + fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC); /// Handle an incoming update_fulfill_htlc message from the given peer. - fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), LightningError>; + fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC); /// Handle an incoming update_fail_htlc message from the given peer. - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), LightningError>; + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC); /// Handle an incoming update_fail_malformed_htlc message from the given peer. - fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), LightningError>; + fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC); /// Handle an incoming commitment_signed message from the given peer. - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(), LightningError>; + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned); /// Handle an incoming revoke_and_ack message from the given peer. - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<(), LightningError>; + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK); /// Handle an incoming update_fee message from the given peer. - fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), LightningError>; + fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee); // Channel-to-announce: /// Handle an incoming announcement_signatures message from the given peer. - fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures) -> Result<(), LightningError>; + fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures); // Connection loss/reestablish: /// Indicates a connection to the peer failed/an existing connection was lost. If no connection @@ -663,7 +663,7 @@ pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Syn /// Handle a peer reconnecting, possibly generating channel_reestablish message(s). fn peer_connected(&self, their_node_id: &PublicKey); /// Handle an incoming channel_reestablish message from the given peer. - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(), LightningError>; + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish); // Error: /// Handle an incoming error message from the given peer. diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c0035f4f4..669ce7782 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -686,73 +686,73 @@ impl PeerManager { // Channel control: 32 => { let msg = try_potential_decodeerror!(msgs::OpenChannel::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg)); + self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg); }, 33 => { let msg = try_potential_decodeerror!(msgs::AcceptChannel::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_accept_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg)); + self.message_handler.chan_handler.handle_accept_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg); }, 34 => { let msg = try_potential_decodeerror!(msgs::FundingCreated::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg); }, 35 => { let msg = try_potential_decodeerror!(msgs::FundingSigned::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_signed(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_funding_signed(&peer.their_node_id.unwrap(), &msg); }, 36 => { let msg = try_potential_decodeerror!(msgs::FundingLocked::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg); }, 38 => { let msg = try_potential_decodeerror!(msgs::Shutdown::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), &msg); }, 39 => { let msg = try_potential_decodeerror!(msgs::ClosingSigned::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg); }, 128 => { let msg = try_potential_decodeerror!(msgs::UpdateAddHTLC::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_update_add_htlc(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_update_add_htlc(&peer.their_node_id.unwrap(), &msg); }, 130 => { let msg = try_potential_decodeerror!(msgs::UpdateFulfillHTLC::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fulfill_htlc(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_update_fulfill_htlc(&peer.their_node_id.unwrap(), &msg); }, 131 => { let msg = try_potential_decodeerror!(msgs::UpdateFailHTLC::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg); }, 135 => { let msg = try_potential_decodeerror!(msgs::UpdateFailMalformedHTLC::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&peer.their_node_id.unwrap(), &msg); }, 132 => { let msg = try_potential_decodeerror!(msgs::CommitmentSigned::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg); }, 133 => { let msg = try_potential_decodeerror!(msgs::RevokeAndACK::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg); }, 134 => { let msg = try_potential_decodeerror!(msgs::UpdateFee::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fee(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_update_fee(&peer.their_node_id.unwrap(), &msg); }, 136 => { let msg = try_potential_decodeerror!(msgs::ChannelReestablish::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg); }, // Routing control: 259 => { let msg = try_potential_decodeerror!(msgs::AnnouncementSignatures::read(&mut reader)); - try_potential_handleerror!(self.message_handler.chan_handler.handle_announcement_signatures(&peer.their_node_id.unwrap(), &msg)); + self.message_handler.chan_handler.handle_announcement_signatures(&peer.their_node_id.unwrap(), &msg); }, 256 => { let msg = try_potential_decodeerror!(msgs::ChannelAnnouncement::read(&mut reader)); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4381e2176..a0df36175 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -100,54 +100,22 @@ impl TestChannelMessageHandler { } impl msgs::ChannelMessageHandler for TestChannelMessageHandler { - fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } - fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { - Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) - } + fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) {} + fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) {} + fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {} + fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {} + fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) {} + fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) {} + fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {} + fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {} + fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {} + fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {} + fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {} + fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {} + fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {} + fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {} + fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {} + fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {} fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} fn peer_connected(&self, _their_node_id: &PublicKey) {} fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}