// You may not use this file except in accordance with one or both of these
// licenses.
-//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
+//! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
//! monitor updates.
//! There are a bunch of these as their handling is relatively error-prone so they are split out
//! here. See also the chanmon_fail_consistency fuzz test.
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hash_types::BlockHash;
use bitcoin::network::constants::Network;
-use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
-use chain::transaction::OutPoint;
-use chain::{ChannelMonitorUpdateErr, Listen, Watch};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
-use ln::channel::AnnouncementSigsState;
-use ln::features::InitFeatures;
-use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use util::config::UserConfig;
-use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
-use util::errors::APIError;
-use util::ser::{ReadableArgs, Writeable};
-use util::test_utils::TestBroadcaster;
-
-use ln::functional_test_utils::*;
-
-use util::test_utils;
-
-use io;
-use prelude::*;
-use sync::{Arc, Mutex};
+use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
+use crate::chain::transaction::OutPoint;
+use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
+use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
+use crate::ln::channel::AnnouncementSigsState;
+use crate::ln::msgs;
+use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::errors::APIError;
+use crate::util::ser::{ReadableArgs, Writeable};
+use crate::util::test_utils::TestBroadcaster;
+
+use crate::ln::functional_test_utils::*;
+
+use crate::util::test_utils;
+
+use crate::io;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
+use crate::prelude::*;
+use crate::sync::{Arc, Mutex};
#[test]
fn test_simple_monitor_permanent_update_fail() {
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1);
let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
- unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {});
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
+ unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
+ ), true, APIError::ChannelUnavailable {..}, {});
check_added_monitors!(nodes[0], 2);
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
_ => panic!("Unexpected event"),
};
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentPathFailed event
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Create some initial channel
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
// Rebalance the network to generate htlc in the two directions
blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
};
let chain_mon = {
- let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
- let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.write(&mut w).unwrap();
- let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
- assert!(new_monitor == *monitor);
+ let new_monitor = {
+ let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+ let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+ assert!(new_monitor == *monitor);
+ new_monitor
+ };
let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
- assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
+ assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
chain_mon
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader {
+ version: 0x20000000,
+ prev_blockhash: BlockHash::all_zeros(),
+ merkle_root: TxMerkleNode::all_zeros(),
+ time: 42,
+ bits: 42,
+ nonce: 42
+ };
chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
- // Set the persister's return value to be a TemporaryFailure.
- persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ // Set the persister's return value to be a InProgress.
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Try to update ChannelMonitor
nodes[1].node.claim_funds(preimage);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
- if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
- if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- // Check that even though the persister is returning a TemporaryFailure,
+ {
+ let mut node_0_per_peer_lock;
+ let mut node_0_peer_state_lock;
+ let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
+ if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ // Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
- if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
- logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1);
- if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+ if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
+ logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
- } else { assert!(false); };
+ }
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
{
- unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {});
+ unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
+ ), false, APIError::MonitorUpdateInProgress, {});
check_added_monitors!(nodes[0], 1);
}
assert_eq!(nodes[0].node.list_channels().len(), 1);
if disconnect {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
let events_3 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_3.len(), 1);
match events_3[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
assert_eq!(payment_hash_1, *payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
// Now set it to failed again...
let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
{
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
+ ), false, APIError::MonitorUpdateInProgress, {});
check_added_monitors!(nodes[0], 1);
}
assert_eq!(nodes[0].node.list_channels().len(), 1);
if disconnect {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
// ...and make sure we can force-close a frozen channel
- nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
// * First we route a payment, then get a temporary monitor update failure when trying to
// route a second payment. We then claim the first payment.
// * If disconnect_count is set, we will disconnect at this point (which is likely as
- // TemporaryFailure likely indicates net disconnect which resulted in failing to update
- // the ChannelMonitor on a watchtower).
+ // InProgress likely indicates net disconnect which resulted in failing to update the
+ // ChannelMonitor on a watchtower).
// * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
// immediately, otherwise we wait disconnect and deliver them via the reconnect
// channel_reestablish processing (ie disconnect_count & 16 makes no sense if
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
// Now try to send a second payment which will fail to send
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
+ ), false, APIError::MonitorUpdateInProgress, {});
check_added_monitors!(nodes[0], 1);
}
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
}
(update_fulfill_htlcs[0].clone(), commitment_signed.clone())
};
if disconnect_count & !disconnect_flags > 0 {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
}
// Now fix monitor updating...
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
macro_rules! disconnect_reconnect_peers { () => { {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
let events_5 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
match events_5[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
assert_eq!(payment_hash_2, *payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, our_payment_hash,
+ RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
assert!(updates.update_fee.is_none());
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
},
_ => panic!("Unexpected event"),
}
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentReceived { payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
assert_eq!(payment_hash, our_payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentReceived { payment_hash, .. } => {
+ Event::PaymentClaimable { payment_hash, .. } => {
assert_eq!(payment_hash, our_payment_hash);
},
_ => panic!("Unexpected event"),
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
send_payment(&nodes[0], &[&nodes[1]], 5000000);
let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
+ RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
{
- nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2)).unwrap();
+ nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
+ RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[1], 1);
}
let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
check_added_monitors!(nodes[1], 1);
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[0], 1);
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
check_added_monitors!(nodes[0], 1);
expect_pending_htlcs_forwardable!(nodes[0]);
- expect_payment_received!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
+ expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
// Rebalance a bit so that we can send backwards from 2 to 1.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
// holding cell.
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Now fail monitor updating.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
// being paused waiting a monitor update.
let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_3,
+ RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
- chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
// Try to route another payment backwards from 2 to make sure 1 holds off on responding
let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
- nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap();
+ nodes[2].node.send_payment_with_route(&route, payment_hash_4,
+ RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
check_added_monitors!(nodes[2], 1);
send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
(Some(payment_preimage_4), Some(payment_hash_4))
} else { (None, None) };
// Restore monitor updating, ensuring we immediately get a fail-back update and a
// update_add update.
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
// Note that the ordering of the events for different nodes is non-prescriptive, though the
// ordering of the two events that both go to nodes[2] have to stay in the same order.
- let messages_a = match events_3.pop().unwrap() {
+ let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
+ let messages_a = match nodes_0_event {
MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
assert!(updates.update_fulfill_htlcs.is_empty());
},
_ => panic!("Unexpected event type!"),
};
+
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
+ let send_event_b = SendEvent::from_event(nodes_2_event);
+ assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
+
let raa = if test_ignore_second_cs {
- match events_3.remove(1) {
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
+ match nodes_2_event {
MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
assert_eq!(node_id, nodes[2].node.get_our_node_id());
Some(msg.clone())
_ => panic!("Unexpected event"),
}
} else { None };
- let send_event_b = SendEvent::from_event(events_3.remove(0));
- assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
// Now deliver the new messages...
check_added_monitors!(nodes[2], 1);
let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
+ // As both messages are for nodes[1], they're in order.
assert_eq!(bs_revoke_and_commit.len(), 2);
match bs_revoke_and_commit[0] {
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
let events_6 = nodes[2].node.get_and_clear_pending_events();
assert_eq!(events_6.len(), 2);
match events_6[0] {
- Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
+ Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
_ => panic!("Unexpected event"),
};
match events_6[1] {
- Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
+ Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
_ => panic!("Unexpected event"),
};
let events_9 = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events_9.len(), 1);
match events_9[0] {
- Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
+ Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
_ => panic!("Unexpected event"),
};
claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[2].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[2], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
- let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
+ nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
check_added_monitors!(nodes[1], 1);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
- assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
- assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
+ assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
+ assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
assert_eq!(
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
// requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
// generation during RAA while in monitor-update-failed state.
{
- nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
check_added_monitors!(nodes[0], 1);
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 0);
}
// Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
// nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
// then restore channel monitor updates.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
// nodes[1] should be AwaitingRAA here!
check_added_monitors!(nodes[1], 0);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
// We send a third payment here, which is somewhat of a redundant test, but the
// chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
// commitment transaction states) whereas here we can explicitly check for it.
{
- nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_3,
+ RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
check_added_monitors!(nodes[0], 0);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
// Forward a payment for B to claim
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
- let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
// Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
// update.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// the monitor still failed
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
// Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
// until we've channel_monitor_update'd and updated for the new commitment transaction.
// Now un-fail the monitor, which will result in B sending its original commitment update,
// receiving the commitment update from A, and the resulting commitment dances.
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ {
+ let mut node_0_per_peer_lock;
+ let mut node_0_peer_state_lock;
+ get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ }
{
- let mut lock;
- get_channel_ref!(nodes[0], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
- get_channel_ref!(nodes[1], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ let mut node_1_per_peer_lock;
+ let mut node_1_peer_state_lock;
+ get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Route the payment and deliver the initial commitment_signed (with a monitor update failure
// on receipt).
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let payment_event = SendEvent::from_event(events.pop().unwrap());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
// is still failing to update monitors.
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
- let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
}
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
// Route the first payment outbound, holding the last RAA for B until we are set up so that we
// can deliver it and fail the monitor update.
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
// Route the second payment, generating an update_add_htlc/commitment_signed
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
let payment_event = SendEvent::from_event(events.pop().unwrap());
assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Deliver the final RAA for the first payment, which does not require a response. RAAs
// generally require a commitment_signed, so the fact that we're expecting an opposite response
// to the next message also tests resetting the delivery order.
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
// Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
// Rebalance a bit so that we can send backwards from 3 to 2.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
// Note that at this point there is a pending commitment transaction update for A being held by
let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
{
- nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[2].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[2], 1);
}
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
// paused, so forward shouldn't succeed until we call channel_monitor_updated().
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
- nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
+ nodes[2].node.send_payment_with_route(&route, payment_hash_3,
+ RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
check_added_monitors!(nodes[2], 1);
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
- let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ let channel_id = chan_1.2;
+ let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 0);
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
expect_payment_sent!(nodes[0], payment_preimage_1);
// Get the payment forwards, note that they were batched into one commitment update.
- expect_pending_htlcs_forwardable!(nodes[1]);
+ nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
assert_eq!(payment_hash_2, *payment_hash);
assert_eq!(1_000_000, amount_msat);
+ assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
+ assert_eq!(via_user_channel_id, Some(42));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
assert_eq!(payment_hash_3, *payment_hash);
assert_eq!(1_000_000, amount_msat);
+ assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
// Rebalance a bit so that we can send backwards from 3 to 1.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
check_added_monitors!(nodes[2], 1);
let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
{
- nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[2].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[2], 1);
}
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- expect_pending_htlcs_forwardable!(nodes[1]);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
- if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(events.len(), 3);
+ if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
assert_eq!(payment_hash, payment_hash_1);
- assert!(rejected_by_dest);
+ assert!(payment_failed_permanently);
} else { panic!("Unexpected event!"); }
- match events[1] {
+ match events[2] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, payment_hash_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
nodes[0].node.process_pending_htlc_forwards();
- expect_payment_received!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
}
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
// Forward a payment for B to claim
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
// Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 1);
}
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 0);
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
+ expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
- nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
- nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
check_added_monitors!(nodes[0], 0);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 0);
}
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // But we want to re-emit ChannelPending
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
}
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
node.gossip_sync.handle_channel_update(&bs_update).unwrap();
}
+ if !restore_b_before_lock {
+ expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+ } else {
+ expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
+ }
+
+
send_payment(&nodes[0], &[&nodes[1]], 8000000);
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
- let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+ let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
// Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
let path = route.paths[0].clone();
route.paths.push(path);
- route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
- route.paths[0][0].short_channel_id = chan_1_id;
- route.paths[0][1].short_channel_id = chan_3_id;
- route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
- route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
- route.paths[1][1].short_channel_id = chan_4_id;
+ route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0].hops[0].short_channel_id = chan_1_id;
+ route.paths[0].hops[1].short_channel_id = chan_3_id;
+ route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
+ route.paths[1].hops[1].short_channel_id = chan_4_id;
// Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
// (for the path 0 -> 2 -> 3) fails.
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Now check that we get the right return value, indicating that the first path succeeded but
- // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
- // some paths succeeded, preventing retry.
- if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
+ // the second got a MonitorUpdateInProgress err. This implies
+ // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
+ if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
+ &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
+ ) {
assert_eq!(results.len(), 2);
if let Ok(()) = results[0] {} else { panic!(); }
- if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
+ if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
} else { panic!(); }
check_added_monitors!(nodes[0], 2);
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
// Pass the first HTLC of the payment along to nodes[3].
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1);
send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
- nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ nodes[1].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[1], 1);
let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// bs_initial_send_msgs are not delivered until they are re-generated after reconnect
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
// bs_first_raa is not delivered until it is re-generated after reconnect
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable!(nodes[0]);
- expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
+ expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
}
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1);
send_payment(&nodes[0], &[&nodes[1]], 1000);
{
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let persister: test_utils::TestPersister;
let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2;
+ let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
- // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed
- // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC
- // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get
- // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but
- // MonitorUpdateFailed is unset, and then swap the flags.
+ // Do a really complicated dance to get an HTLC into the holding cell, with
+ // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
+ // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
+ // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
+ // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
+ // flags.
//
// We do this by:
// a) routing a payment from node B to node A,
// e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
// clearing AwaitingRemoteRevoke on node A.
//
- // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c)
- // will not be freed from the holding cell.
+ // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
+ // (c) will not be freed from the holding cell.
let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
- nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_1,
+ RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let send = SendEvent::from_node(&nodes[0]);
assert_eq!(send.msgs.len(), 1);
- nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 0);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.claim_funds(payment_preimage_0);
check_added_monitors!(nodes[0], 1);
- expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
// disconnect the peers. Note that the fuzzer originally found this issue because
// deserializing a ChannelManager in this state causes an assertion failure.
if reload_a {
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let config = UserConfig::default();
- nodes_0_deserialized = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap().1
- };
- nodes[0].node = &nodes_0_deserialized;
- assert!(nodes_0_read.is_empty());
-
- nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
- check_added_monitors!(nodes[0], 1);
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
} else {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
}
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now reconnect the two
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
// If we finish updating the monitor, we should free the holding cell right away (this did
// not occur prior to #756).
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
+ expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
// New outbound messages should be generated immediately upon a call to
// get_and_clear_pending_msg_events (but not before).
let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
+ expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
check_added_monitors!(nodes[1], 1);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
};
nodes[1].node.process_pending_htlc_forwards();
- expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
+ expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
// In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
// awaiting a remote revoke_and_ack from nodes[0].
let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
- nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, second_payment_hash,
+ RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
};
if second_fails {
nodes[2].node.fail_htlc_backwards(&payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
} else {
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
}
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
} else {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
// Test that temporary failures when updating the monitor's shutdown script delay cooperative
// close.
let mut config = test_default_channel_config();
- config.own_channel_config.commit_upfront_shutdown_pubkey = false;
+ config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
check_added_monitors!(nodes[1], 1);
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
// Test that permanent failures when updating the monitor's shutdown script result in a force
// close when initiating a cooperative close.
let mut config = test_default_channel_config();
- config.own_channel_config.commit_upfront_shutdown_pubkey = false;
+ config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
- check_closed_broadcast!(nodes[0], true);
+
+ // We always send the `shutdown` response when initiating a shutdown, even if we immediately
+ // close the channel thereafter.
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 3);
+ if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+ if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
+
check_added_monitors!(nodes[0], 2);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
}
// Test that permanent failures when updating the monitor's shutdown script result in a force
// close when handling a cooperative close.
let mut config = test_default_channel_config();
- config.own_channel_config.commit_upfront_shutdown_pubkey = false;
+ config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
- check_closed_broadcast!(nodes[1], true);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
+
+ // We always send the `shutdown` response when receiving a shutdown, even if we immediately
+ // close the channel thereafter.
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 3);
+ if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+ if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
+
check_added_monitors!(nodes[1], 2);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
}
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// `claim_funds` results in a ChannelMonitorUpdate.
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
// which had some asserts that prevented it from being called twice.
nodes[1].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
check_added_monitors!(nodes[1], 0);
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
- // Complete the first HTLC.
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
+ // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
+ // and get both PaymentClaimed events at once.
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
+ _ => panic!("Unexpected Event: {:?}", events[0]),
+ }
+ match events[1] {
+ Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
+ _ => panic!("Unexpected Event: {:?}", events[1]),
+ }
+
+ assert_eq!(msg_events.len(), 1);
let (update_fulfill_1, commitment_signed_b1, node_id) = {
- match &events[0] {
+ match &msg_events[0] {
&MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
assert!(update_add_htlcs.is_empty());
assert_eq!(update_fulfill_htlcs.len(), 1);
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
expect_payment_sent!(nodes[0], payment_preimage_2);
}
+
+fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
+ // Test that if the monitor update generated in funding_signed is stored async and we restart
+ // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
+ // drop the channel and move on.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+
+ let mut chan_config = test_default_channel_config();
+ chan_config.manually_accept_inbound_channels = true;
+ chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ if use_0conf {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ } else {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ }
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+ let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
+ match &bs_signed_locked[0] {
+ MessageSendEvent::SendFundingSigned { msg, .. } => {
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
+ check_added_monitors!(nodes[0], 1);
+ }
+ _ => panic!("Unexpected event"),
+ }
+ if use_0conf {
+ match &bs_signed_locked[1] {
+ MessageSendEvent::SendChannelReady { msg, .. } => {
+ nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
+ }
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+ // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
+ // broadcast the funding transaction. If nodes[0] restarts at this point with the
+ // ChannelMonitor lost, we should simply discard the channel.
+
+ // The test framework checks that watched_txn/outputs match the monitor set, which they will
+ // not, so we have to clear them here.
+ nodes[0].chain_source.watched_txn.lock().unwrap().clear();
+ nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
+
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
+ check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
+ assert!(nodes[0].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_outbound_reload_without_init_mon() {
+ do_test_outbound_reload_without_init_mon(true);
+ do_test_outbound_reload_without_init_mon(false);
+}
+
+fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
+ // Test that if the monitor update generated by funding_transaction_generated is stored async
+ // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
+ // completed we happily drop the channel and move on.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+
+ let mut chan_config = test_default_channel_config();
+ chan_config.manually_accept_inbound_channels = true;
+ chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ if use_0conf {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ } else {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ }
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
+ // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
+ // transaction is confirmed.
+ let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+ check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
+
+ let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ if lock_commitment {
+ confirm_transaction(&nodes[0], &as_funding_tx[0]);
+ confirm_transaction(&nodes[1], &as_funding_tx[0]);
+ }
+ if use_0conf || lock_commitment {
+ let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
+ // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
+ // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
+
+ // The test framework checks that watched_txn/outputs match the monitor set, which they will
+ // not, so we have to clear them here.
+ nodes[1].chain_source.watched_txn.lock().unwrap().clear();
+ nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
+
+ reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
+
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ assert!(nodes[1].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_inbound_reload_without_init_mon() {
+ do_test_inbound_reload_without_init_mon(true, true);
+ do_test_inbound_reload_without_init_mon(true, false);
+ do_test_inbound_reload_without_init_mon(false, true);
+ do_test_inbound_reload_without_init_mon(false, false);
+}