use crate::chain::{ChannelMonitorUpdateStatus, Watch};
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::chain::channelmonitor::ChannelMonitor;
-use crate::chain::keysinterface::EntropySource;
+use crate::sign::EntropySource;
use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
-use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId};
+use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert!(events_2.is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
// nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
}
// Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
+ expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id());
+ expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id());
+
nodes.push(node_a);
nodes.push(node_b);
// Make sure the channel is functioning as though the de/serialization never happened
assert_eq!(nodes[0].node.list_channels().len(), 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- { // Channel close should result in a commitment tx
- let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(txn.len(), 1);
- check_spends!(txn[0], funding_tx);
- assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
- }
-
for monitor in node_0_monitors.drain(..) {
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
+
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+ { // Channel close should result in a commitment tx
+ nodes[0].node.timer_tick_occurred();
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ }
+ check_added_monitors!(nodes[0], 1);
// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
- nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
- nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+ features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
let mut found_err = false;
for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
if reconnect_panicing {
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
// after the warning message sent by B, we should not able to
// use the channel, or reconnect with success to the channel.
assert!(nodes[0].node.list_usable_channels().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
// First send a payment to nodes[1]
let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
// Next send a payment which is forwarded by nodes[1]
let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
- nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+ nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(route.paths.len(), 2);
route.paths.sort_by(|path_a, _| {
// Sort the path so that the path through nodes[1] comes first
- if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+ if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
});
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 2);
// Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
if persist_both_monitors {
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+ check_added_monitors(&nodes[3], 2);
+ } else {
+ check_added_monitors(&nodes[3], 1);
}
// On restart, we should also get a duplicate PaymentClaimed event as we persisted the
if !persist_both_monitors {
// If one of the two channels is still live, reveal the payment preimage over it.
- nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+ features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
- nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+ features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
let (mut route, payment_hash, payment_preimage, payment_secret) =
get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
if use_intercept {
- route.paths[0][1].short_channel_id = intercept_scid;
+ route.paths[0].hops[1].short_channel_id = intercept_scid;
}
let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap();
check_added_monitors!(nodes[0], 1);
let payment_event = SendEvent::from_node(&nodes[0]);
});
}
+ nodes[1].node.timer_tick_occurred();
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_commitment_tx.len(), 1);
+ check_added_monitors!(nodes[1], 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
if claim_htlc {
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
} else {
- connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
+ connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_htlc_timeout_tx.len(), 1);
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
_ => panic!("Unexpected event"),
}
+ nodes[1].node.test_process_background_events();
+ check_added_monitors(&nodes[1], 1);
+
// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.