use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
}
#[test]
send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
}
#[test]
assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
}
#[test]
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
}
#[test]
mine_transaction(&nodes[0], &remote_txn[0]);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
// Check we only broadcast 1 timeout tx
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
}
#[test]
check_closed_broadcast!(nodes[0], true);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
// One pending HTLC is discarded by the force-close:
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
macro_rules! claim_funds {
($node: expr, $prev_node: expr, $preimage: expr) => {
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
// confusing us in the following tests.
assert_eq!(nodes[4].node.list_channels().len(), 0);
nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
}
#[test]
node_txn.truncate(1);
}
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// Verify broadcast of revoked HTLC-timeout
let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
// Broadcast revoked HTLC-timeout on node 1
mine_transaction(&nodes[1], &node_txn[1]);
test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
check_added_monitors!(nodes[1], 1);
mine_transaction(&nodes[0], &node_txn[1]);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
}
get_announce_close_broadcast_events(&nodes, 0, 1);
// Inform nodes[1] that nodes[0] broadcast a stale tx
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
// Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
mine_transaction(&nodes[0], &revoked_local_txn[0]);
get_announce_close_broadcast_events(&nodes, 0, 1);
- check_added_monitors!(nodes[0], 1)
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
}
#[test]
{
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[1], payment_hash_2, true);
check_added_monitors!(nodes[0], 1);
confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
check_added_monitors!(nodes[1], 1);
- expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ match events[1] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+ _ => panic!("Unexpected event"),
+ }
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[1], payment_hash_2, true);
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
assert_eq!(node_txn.len(), 5);
assert_eq!(node_txn[0], node_txn[3]);
added_monitors.clear();
}
let forwarded_events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(forwarded_events.len(), 2);
- if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
- } else { panic!(); }
+ assert_eq!(forwarded_events.len(), 3);
+ match forwarded_events[0] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+ _ => panic!("Unexpected event"),
+ }
if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
} else { panic!(); }
+ if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
+ } else { panic!(); }
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
let commitment_spend =
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
+ assert_eq!(events.len(), 3);
let mut first_claimed = false;
for event in events {
match event {
assert_eq!(payment_preimage, our_payment_preimage_2);
}
},
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
_ => panic!("Unexpected event"),
}
}
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan_2.3);
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
mine_transaction(&nodes[1], &commitment_tx[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let timeout_tx;
{
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
assert_eq!(node_txn.len(), 2);
check_spends!(node_txn[0], chan_1.3);
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+ assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
match events[0] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
+ _ => panic!("Unexepected event"),
+ }
+ match events[1] {
Event::PaymentFailed { ref payment_hash, .. } => {
assert_eq!(*payment_hash, fourth_payment_hash);
},
_ => panic!("Unexpected event"),
}
if !deliver_bs_raa {
- match events[1] {
+ match events[2] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
};
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
}
-
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
// Check that Alice fails backward the pending HTLC from the second payment.
- expect_payment_failed!(nodes[0], failed_payment_hash, true);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, failed_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
+ assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
+ },
+ _ => panic!("Unexpected event {:?}", events[1]),
+ }
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
}
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3);
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
// Duplicate the connect_block call since this may happen due to other listeners
// registering new transactions
nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
header.prev_blockhash = nodes[0].best_block_hash();
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
+ check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
nodes[0].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[1], &node_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
mine_transaction(&nodes[1], &node_txn[0]);
check_spends!(node_txn[2], node_txn[1]);
mine_transaction(&nodes[1], &node_txn[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
mine_transaction(&nodes[1], &node_txn[1]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[1], our_payment_hash, true);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 2);
connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 3);
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
check_added_monitors!(nodes[1], 1);
- expect_payment_forwarded!(nodes[1], Some(1000), true);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+ assert_eq!(fee_earned_msat, Some(1000));
+ assert_eq!(claim_from_onchain_tx, true);
+ },
+ _ => panic!("Unexpected event"),
+ }
{
let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
// ChannelMonitor: claim tx
check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
b_txn.clear();
}
+ check_added_monitors!(nodes[1], 1);
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- check_added_monitors!(nodes[1], 1);
match msg_events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
// Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
// ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
assert_eq!(b_txn.len(), 3);
mine_transaction(&nodes[1], &commitment_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
let htlc_timeout_tx;
nodes[2].node.claim_funds(our_payment_preimage);
mine_transaction(&nodes[2], &commitment_txn[0]);
check_added_monitors!(nodes[2], 2);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
let events = nodes[2].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
check_added_monitors!(nodes[1], 1);
mine_transaction(&nodes[1], &local_txn[0]);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let events = nodes[1].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
} else {
mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
}
+ let events = nodes[2].node.get_and_clear_pending_events();
+ let close_event = if deliver_last_raa {
+ assert_eq!(events.len(), 2);
+ events[1].clone()
+ } else {
+ assert_eq!(events.len(), 1);
+ events[0].clone()
+ };
+ match close_event {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+ _ => panic!("Unexpected event"),
+ }
+
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ if deliver_last_raa {
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+ } else {
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ }
check_added_monitors!(nodes[2], 3);
let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
mine_transaction(&nodes[0], &local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
let htlc_timeout = {
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
let htlc_timeout = {
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
mine_transaction(&nodes[0], &closing_tx);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
check_spends!(spend_txn[0], closing_tx);
mine_transaction(&nodes[1], &closing_tx);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
check_closed_broadcast!(nodes[1], true).unwrap();
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
}
#[test]
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
if local {
// We fail dust-HTLC 1 by broadcast of local commitment tx
mine_transaction(&nodes[0], &as_commitment_tx[0]);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[0], dust_hash, true);
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
accept_channel.to_self_delay = 200;
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+ let reason_msg;
if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
match action {
&ErrorAction::SendErrorMessage { ref msg } => {
assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
+ reason_msg = msg.data.clone();
},
- _ => { assert!(false); }
+ _ => { panic!(); }
}
- } else { assert!(false); }
+ } else { panic!(); }
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
// We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
// Check we close channel detecting A is fallen-behind
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
check_added_monitors!(nodes[1], 1);
-
// Check A is able to claim to_remote output
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
assert_eq!(node_txn[0].output.len(), 2);
mine_transaction(&nodes[0], &node_txn[0]);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
- expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ match events[1] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+ _ => panic!("Unexpected event"),
+ }
let first;
let feerate_1;
let penalty_txn;
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
}
#[test]
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
let penalty_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
}
#[test]
chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
let htlc_timeout = {
nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
+ check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
if go_onchain_before_fulfill {
let txn_to_broadcast = match broadcast_alice {
true => alice_txn.clone(),
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
}
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], chan_ab.3);
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
}
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
if broadcast_alice {
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 2);
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
match events[0] {
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
confirm_transaction_at(&nodes[1], &tx, 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
check_added_monitors!(nodes[1], 1);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
nodes[1].node.force_close_channel(&channel_id).unwrap();
check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);