use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use ln::features::{InitFeatures, NodeFeatures};
use ln::router::Route;
use ln::msgs;
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentPreimage(pub [u8;32]);
-type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
- break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
- },
+ break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) },
Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
}
}
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+ return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
},
Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
ChannelMonitorUpdateErr::TemporaryFailure => {},
}
}
- let mut shutdown_res = chan.force_shutdown();
- if shutdown_res.0.len() >= 1 {
- log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
- }
- shutdown_res.0.clear();
+ let shutdown_res = chan.force_shutdown(false);
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
}
}
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
res
},
ChannelMonitorUpdateErr::TemporaryFailure => {
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
- let (local_txn, mut failed_htlcs) = shutdown_res;
- log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
+ let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+ log_trace!(self, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- for tx in local_txn {
- log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
+ if let Some(funding_txo) = funding_txo_option {
+ // There isn't anything we can do if we get an update failure - we're already
+ // force-closing. The monitor update on the required in-memory copy should broadcast
+ // the latest local state, which is the best we can do anyway. Thus, it is safe to
+ // ignore the result here.
+ let _ = self.monitor.update_monitor(funding_txo, monitor_update);
}
}
}
};
log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown());
+ self.finish_force_close_channel(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
Some(mut chan) => {
(chan.get_outbound_funding_created(funding_txo)
.map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
} else { unreachable!(); })
, chan)
},
ChannelMonitorUpdateErr::PermanentFailure => {
{
let mut channel_state = self.channel_state.lock().unwrap();
- match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) {
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id(), channel_state) {
Err(_) => { return; },
Ok(()) => unreachable!(),
}
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
},
ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
// any messages referencing a previously-closed channel anyway.
- return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
+ return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
},
ChannelMonitorUpdateErr::TemporaryFailure => {
// There's no problem signing a counterparty's funding transaction if our monitor
// It looks like our counterparty went on-chain. We go ahead and
// broadcast our latest local state as well here, just in case its
// some kind of SPV attack, though we expect these to be dropped.
- failed_channels.push(channel.force_shutdown());
+ failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(channel.force_shutdown());
// If would_broadcast_at_height() is true, the channel_monitor will broadcast
// the latest local tx for us, so we should skip that here (it doesn't really
// hurt anything, but does make tests a bit simpler).
- failed_channels.last_mut().unwrap().0 = Vec::new();
+ failed_channels.push(channel.force_shutdown(false));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
if let Some(short_id) = v.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(v.force_shutdown());
+ failed_channels.push(v.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&v) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(chan.force_shutdown());
+ failed_channels.push(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
let latest_block_height: u32 = Readable::read(reader)?;
let last_block_hash: Sha256dHash = Readable::read(reader)?;
- let mut closed_channels = Vec::new();
+ let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
- let mut force_close_res = channel.force_shutdown();
- force_close_res.0 = monitor.get_latest_local_commitment_txn();
- closed_channels.push(force_close_res);
+ let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+ failed_htlcs.append(&mut new_failed_htlcs);
+ monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
} else {
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_id.insert(short_channel_id, channel.channel_id());
for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
if !funding_txo_set.contains(funding_txo) {
- closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
+ monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
}
}
default_configuration: args.default_config,
};
- for close_res in closed_channels.drain(..) {
- channel_manager.finish_force_close_channel(close_res);
- //TODO: Broadcast channel update for closed channels, but only after we've made a
- //connection or two.
+ for htlc_source in failed_htlcs.drain(..) {
+ channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
+ //TODO: Broadcast channel update for closed channels, but only after we've made a
+ //connection or two.
+
Ok((last_block_hash.clone(), channel_manager))
}
}
//Should produce and error.
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
-
- //clear the message we could not handle
- nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[1], 1);
+ check_closed_broadcast!(nodes[1], true);
}
#[test]
// checks it, but in this case nodes[0] didn't ever get a chance to receive a
// closing_signed so we do it ourselves
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
}
assert!(nodes[0].node.list_channels().is_empty());
let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[0], 1);
// Check we only broadcast 1 timeout tx
let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(nodes[1].node.list_channels().len(), 1);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value");
+ check_added_monitors!(nodes[1], 1);
return;
}
}
// Simple case with no pending HTLCs:
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+ check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
}
get_announce_close_broadcast_events(&nodes, 0, 1);
// Simple case of one pending HTLC to HTLC-Timeout
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
+ check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ check_added_monitors!(nodes[2], 1);
test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
}
get_announce_close_broadcast_events(&nodes, 1, 2);
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+ check_added_monitors!(nodes[2], 1);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[3].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[3], 1);
check_preimage_claim(&nodes[3], &node_txn);
}
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
}
+ check_added_monitors!(nodes[3], 1);
// Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
{
nodes[4].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
}
+ check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
node_txn.swap_remove(0);
node_txn.truncate(1);
}
+ check_added_monitors!(nodes[1], 1);
test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
// Verify broadcast of revoked HTLC-timeout
let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ check_added_monitors!(nodes[0], 1);
// Broadcast revoked HTLC-timeout on node 1
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
check_spends!(node_txn[0], revoked_local_txn[0]);
node_txn.swap_remove(0);
}
+ check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ check_added_monitors!(nodes[1], 1);
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
}
// Inform nodes[1] that nodes[0] broadcast a stale tx
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
// Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
get_announce_close_broadcast_events(&nodes, 0, 1);
+ check_added_monitors!(nodes[0], 1)
}
#[test]
{
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[0], 1);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[1], 1);
connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
let events = nodes[1].node.get_and_clear_pending_events();
{
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+ check_added_monitors!(nodes[0], 1);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+ check_added_monitors!(nodes[1], 1);
connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
let events = nodes[1].node.get_and_clear_pending_events();
nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
check_closed_broadcast!(nodes[2], false);
+ check_added_monitors!(nodes[2], 1);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
assert_eq!(node_txn.len(), 7);
assert_eq!(node_txn[0], node_txn[3]);
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
nodes[1].block_notifier.block_connected(&Block { header, txdata: node_txn}, 1);
+ {
+ let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
+ added_monitors.clear();
+ }
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
check_spends!(commitment_tx[0], chan_1.3);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 1 (HTLC-Success)
assert_eq!(node_txn.len(), 4);
check_spends!(node_txn[0], commitment_tx[0]);
// Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
let mut first_claimed = false;
};
nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
check_closed_broadcast!(nodes[2], false);
+ check_added_monitors!(nodes[2], 1);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan_2.3);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![timeout_tx]}, 1);
connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
- check_added_monitors!(nodes[1], 0);
+ check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
expect_pending_htlcs_forwardable!(nodes[1]);
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx
assert_eq!(node_txn.len(), 3);
check_spends!(node_txn[0], commitment_tx[0]);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
- check_added_monitors!(nodes[1], 0);
+ check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
expect_pending_htlcs_forwardable!(nodes[1]);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[1], 1);
connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
let events = nodes[1].node.get_and_clear_pending_events();
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
// Duplicate the block_connected call since this may happen due to other listeners
// registering new transactions
nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
check_closed_broadcast!(nodes[2], false);
+ check_added_monitors!(nodes[2], 1);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
height -= 1;
}
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let channel_state = nodes[0].node.channel_state.lock().unwrap();
assert_eq!(channel_state.by_id.len(), 0);
assert_eq!(channel_state.short_to_id.len(), 0);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::supported(), InitFeatures::supported());
nodes[1].node.force_close_channel(&chan.2);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::supported(), InitFeatures::supported());
nodes[0].node.force_close_channel(&chan.2);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0], spend_txn[1]);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert!(nodes[1].node.claim_funds(payment_preimage, 3_000_000));
check_added_monitors!(nodes[1], 1);
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
+ check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2);
// A will generate HTLC-Timeout from revoked commitment tx
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 3);
// B will generate justice tx from A's revoked commitment/HTLC tx
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-timeout, adjusted justice tx, ChannelManager: local commitment tx
// B will generate HTLC-Success from revoked commitment tx
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 3);
// A will generate justice tx from B's revoked commitment/HTLC tx
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
check_closed_broadcast!(nodes[2], false);
+ check_added_monitors!(nodes[2], 1);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 4);
assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
b_txn.clear();
}
+ check_added_monitors!(nodes[1], 1);
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[1], 1);
match msg_events[0] {
assert_eq!(b_txn[0].lock_time, 0); // Success tx
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
}
#[test]
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let htlc_timeout_tx;
{ // Extract one of the two HTLC-Timeout transaction
nodes[2].node.claim_funds(our_payment_preimage, 900_000);
nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
- check_added_monitors!(nodes[2], 2);
+ check_added_monitors!(nodes[2], 3);
let events = nodes[2].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
check_added_monitors!(nodes[1], 1);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
connect_blocks(&nodes[2].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
check_closed_broadcast!(nodes[2], false);
expect_pending_htlcs_forwardable!(nodes[2]);
- check_added_monitors!(nodes[2], 2);
+ check_added_monitors!(nodes[2], 3);
let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(cs_msgs.len(), 2);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn[0].input.len(), 1);
}
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
}
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
if !check_revoke_no_close {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
} else {
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
check_closed_broadcast!(nodes[1], true).unwrap();
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote side tried to send less than our minimum HTLC value");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to push more than our max accepted HTLCs");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data,"Remote HTLC add would put them over our max HTLC value");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote skipped HTLC ID");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+ check_added_monitors!(nodes[0], 1);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+ check_added_monitors!(nodes[0], 1);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+ check_added_monitors!(nodes[0], 1);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
+ check_added_monitors!(nodes[0], 1);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill HTLC with an incorrect preimage");
+ check_added_monitors!(nodes[0], 1);
}
-
#[test]
fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
//BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
assert!(nodes[0].node.list_channels().is_empty());
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
+ check_added_monitors!(nodes[0], 1);
}
#[test]
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_prev_commitment_tx[0].clone()]}, 1);
}
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
if local {
// We fail dust-HTLC 1 by broadcast of local commitment tx
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_commitment_tx[0].clone()]}, 1);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
let parent_hash = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
} else {
// We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![bs_commitment_tx[0].clone()]}, 1);
+ check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
let parent_hash = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
// Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
- let events = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- if let MessageSendEvent::HandleError { ref action, .. } = events[1] {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
- },
- _ => { assert!(false); }
- }
- } else { assert!(false); }
+ assert_eq!(check_closed_broadcast!(nodes[2], true).unwrap().data, "Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
+ check_added_monitors!(nodes[2], 1);
// We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
// Check we update monitor following learning of per_commitment_point from B
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
- check_added_monitors!(nodes[0], 1);
+ check_added_monitors!(nodes[0], 2);
{
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// Check we close channel detecting A is fallen-behind
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- match events [1] {
- MessageSendEvent::HandleError { ref action, .. } => {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
- _ => panic!("Unexpected event!"),
- }
- },
- _ => panic!("Unexpected event"),
- }
+ assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
+ check_added_monitors!(nodes[1], 1);
+
// Check A is able to claim to_remote output
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_txn[0].clone()] }, 115);
+ check_added_monitors!(nodes[1], 1);
// One or more justice tx should have been broadcast, check it
let penalty_1;
// B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 6);
node_txn.clear();
}
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
}
#[test]
nodes[1].node.claim_funds(payment_preimage, 3_000_000);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
- check_added_monitors!(nodes[1], 1);
+ check_added_monitors!(nodes[1], 2);
// One or more claim tx should have been broadcast, check it
let timeout;
// Connect blocks on node A commitment transaction
let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
+ check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
// Verify node A broadcast tx claiming both HTLCs
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn[0].input.len(), 2);
node_txn.clear();
}
- nodes[0].node.get_and_clear_pending_msg_events();
// Connect blocks on node B
connect_blocks(&nodes[1].block_notifier, 135, 0, false, Default::default());
+ check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn[2].input.len(), 1);
node_txn[1].clone()
};
- nodes[1].node.get_and_clear_pending_msg_events();
// Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
let header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
+ check_added_monitors!(nodes[1], 1);
}
#[test]
let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129);
check_closed_broadcast!(nodes[0], false);
+ check_added_monitors!(nodes[0], 1);
let penalty_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx