// PaymentPathFailed event
assert_eq!(nodes[0].node.list_channels().len(), 0);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
+ [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.send_channel_ready = (true, true);
+ reconnect_nodes(reconnect_args);
}
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}
// ...and make sure we can force-close a frozen channel
// PaymentPathFailed event
assert_eq!(nodes[0].node.list_channels().len(), 0);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
{
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
- get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Route the payment and deliver the initial commitment_signed (with a monitor update failure
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.send_channel_ready.1 = confirm_a_first;
+ reconnect_nodes(reconnect_args);
// But we want to re-emit ChannelPending
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
send_payment(&nodes[0], &[&nodes[1]], 8000000);
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
mine_transaction(&nodes[1], &bs_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
check_added_monitors!(nodes[0], 0);
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.claim_funds(payment_preimage_0);
// disconnect the peers. Note that the fuzzer originally found this issue because
// deserializing a ChannelManager in this state causes an assertion failure.
if reload_a {
- let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
} else {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
}
assert_eq!(pending_cs.commitment_signed, cs);
} else { panic!(); }
- // There should be no monitor updates as we are still pending awaiting a failed one.
- check_added_monitors!(nodes[0], 0);
- check_added_monitors!(nodes[1], 0);
+ if reload_a {
+ // The two pending monitor updates were replayed (but are still pending).
+ check_added_monitors(&nodes[0], 2);
+ } else {
+ // There should be no monitor updates as we are still pending awaiting a failed one.
+ check_added_monitors(&nodes[0], 0);
+ }
+ check_added_monitors(&nodes[1], 0);
}
// If we finish updating the monitor, we should free the holding cell right away (this did
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
if second_fails {
- reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+ reconnect_args.pending_htlc_fails.0 = 1;
+ reconnect_nodes(reconnect_args);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
} else {
- reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+ reconnect_args.pending_htlc_claims.0 = 1;
+ reconnect_nodes(reconnect_args);
}
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
assert_eq!(txn_a, txn_b);
assert_eq!(txn_a.len(), 1);
check_spends!(txn_a[0], funding_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
check_added_monitors!(nodes[0], 2);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
+ [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
check_added_monitors!(nodes[1], 2);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
- check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
assert!(nodes[0].node.list_channels().is_empty());
}
reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
assert!(nodes[1].node.list_channels().is_empty());
}