nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
+ [nodes[0].node.get_our_node_id()], channel_value);
}
#[test]
send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
mine_transaction(&nodes[0], &remote_txn[0]);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
+ [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
check_closed_broadcast!(nodes[0], true);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
macro_rules! claim_funds {
($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
- check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
// confusing us in the following tests.
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
ChannelMonitorUpdateStatus::Completed);
- check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
}
#[test]
node_txn.swap_remove(0);
}
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// Verify broadcast of revoked HTLC-timeout
let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
// Broadcast revoked HTLC-timeout on node 1
mine_transaction(&nodes[1], &node_txn[1]);
test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
check_added_monitors!(nodes[1], 1);
mine_transaction(&nodes[0], &node_txn[1]);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
}
get_announce_close_broadcast_events(&nodes, 0, 1);
// Inform nodes[1] that nodes[0] broadcast a stale tx
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
mine_transaction(&nodes[0], &revoked_local_txn[0]);
get_announce_close_broadcast_events(&nodes, 0, 1);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
{
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[0], 1);
confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let mut events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
match events.last().unwrap() {
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
assert_eq!(node_txn.len(), 2);
check_spends!(node_txn[0], commitment_tx[0]);
mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
let commitment_spend =
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 0);
// Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
mine_transaction(&nodes[1], &commitment_tx[0]);
- check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
+ check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
+ , [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
let timeout_tx = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], commitment_tx[0]);
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
// Duplicate the connect_block call since this may happen due to other listeners
// registering new transactions
nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
- check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
+ check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+ , [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
+ , [nodes[0].node.get_our_node_id()], 1000000);
}
#[test]
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[1], &node_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
mine_transaction(&nodes[1], &node_txn[0]);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
mine_transaction(&nodes[1], &node_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
mine_transaction(&nodes[1], &node_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[1], our_payment_hash, false);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(revoked_htlc_txn.len(), 1);
connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 1);
// Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// ChannelMonitor: HTLC-Success tx
assert_eq!(b_txn.len(), 1);
mine_transaction(&nodes[1], &commitment_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
let htlc_timeout_tx;
mine_transaction(&nodes[2], &commitment_txn[0]);
check_added_monitors!(nodes[2], 2);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let events = nodes[2].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
mine_transaction(&nodes[1], &local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let events = nodes[1].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
mine_transaction(&nodes[0], &local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let htlc_timeout = {
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let htlc_timeout = {
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
mine_transaction(&nodes[0], &closing_tx);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
check_spends!(spend_txn[0], closing_tx);
mine_transaction(&nodes[1], &closing_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
check_closed_broadcast!(nodes[1], true).unwrap();
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
}
#[test]
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
if local {
// We fail dust-HTLC 1 by broadcast of local commitment tx
mine_transaction(&nodes[0], &as_commitment_tx[0]);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[0], dust_hash, false);
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
_ => { panic!(); }
}
} else { panic!(); }
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
// We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
let revoked_htlc_txn = {
});
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
+ , [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
let penalty_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
connect_blocks(&nodes[1], 1);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
+ check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(close_ev.len(), 1);
match close_ev[0] {
}
_ => panic!("Unexpected event"),
}
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
_ => panic!("Unexpected event"),
}
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }
+ , [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
+ check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+ [nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
{
let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
+ [nodes[1].node.get_our_node_id(); 2], 100000);
}
#[test]
chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV);
let htlc_timeout = {
nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
- check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
if go_onchain_before_fulfill {
let txn_to_broadcast = match broadcast_alice {
true => alice_txn.clone(),
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
}
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
if broadcast_alice {
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
- check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+ [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 2);
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+ [nodes[1].node.get_our_node_id(); 2], 100000);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
match events[0] {
let expected_err = "funding tx had wrong script/value or output index";
confirm_transaction_at(&nodes[1], &tx, 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
- err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+ err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
+ [nodes[0].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
},
nodes[0].node.timer_tick_occurred();
check_outbound_channel_existence(false);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
nodes[1].node.timer_tick_occurred();
check_inbound_channel_existence(false);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 8000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 8000000);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
// Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
check_closed_broadcast!(nodes[0], true);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
assert!(nodes[0].node.list_channels().is_empty());
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id(), nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_test_shutdown_rebroadcast(recv_count: u8) {
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
} else {
// If one node, however, received + responded with an identical closing_signed we end
// up erroring and node[0] will try to broadcast its own latest commitment transaction.
// closing_signed so we do it ourselves
check_closed_broadcast!(nodes[1], false);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
+ , [nodes[0].node.get_our_node_id()], 100000);
}
assert!(nodes[0].node.list_channels().is_empty());
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
},
_ => panic!("Unexpected event"),
}
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)".to_string() }
+ , [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
if timeout_step == TimeoutStep::NoTimeout {
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
if timeout_step != TimeoutStep::NoTimeout {
txn[0].output[0].script_pubkey.is_v0_p2wsh()));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() }
+ , [nodes[0].node.get_our_node_id()], 100000);
} else {
assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
assert!(node_0_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
assert!(node_0_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}