//! claim outputs on-chain.
use chain;
-use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr};
+use chain::{Confirm, Listen, Watch};
use chain::channelmonitor;
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use chain::transaction::OutPoint;
send_payment(&nodes[0], &[&nodes[1]], 1000000);
}
-fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
- // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
- // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
- // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
- // the ChannelMonitor tells it to.
- //
- // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
- // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
- // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-
- // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
- // nodes[0].
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
- check_closed_broadcast!(nodes[0], true);
- check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
- connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[0], node_txn[1]);
- check_spends!(node_txn[1], funding_tx);
- check_spends!(node_txn[2], node_txn[1]);
- let timeout_txn = vec![node_txn[2].clone()];
-
- assert!(nodes[1].node.claim_funds(payment_preimage));
- check_added_monitors!(nodes[1], 1);
-
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
- check_closed_broadcast!(nodes[1], true);
- check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-
- header.prev_blockhash = nodes[0].best_block_hash();
- connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
-
- if confirm_commitment_tx {
- connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
- }
-
- header.prev_blockhash = nodes[0].best_block_hash();
- let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } };
-
- if payment_timeout {
- assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
- connect_block(&nodes[0], &claim_block);
- connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
- }
-
- // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
- // returning TemporaryFailure. This should cause the claim event to never make its way to the
- // ChannelManager.
- chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-
- if payment_timeout {
- connect_blocks(&nodes[0], 1);
- } else {
- connect_block(&nodes[0], &claim_block);
- }
-
- let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
- let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
- .get_mut(&funding_txo).unwrap().drain().collect();
- assert_eq!(mon_updates.len(), 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-
- // If we persist the ChannelManager here, we should get the PaymentSent event after
- // deserialization.
- let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
- if !persist_manager_post_event {
- nodes[0].node.write(&mut chan_manager_serialized).unwrap();
- }
-
- // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
- // payment sent event.
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
- nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
- if payment_timeout {
- expect_payment_failed!(nodes[0], payment_hash, true);
- } else {
- expect_payment_sent!(nodes[0], payment_preimage);
- }
-
- // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
- // twice.
- if persist_manager_post_event {
- nodes[0].node.write(&mut chan_manager_serialized).unwrap();
- }
-
- // Now reload nodes[0]...
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
- ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
- default_config: Default::default(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
-
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
- check_added_monitors!(nodes[0], 1);
- nodes[0].node = &nodes_0_deserialized;
-
- if persist_manager_post_event {
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
- } else if payment_timeout {
- expect_payment_failed!(nodes[0], payment_hash, true);
- } else {
- expect_payment_sent!(nodes[0], payment_preimage);
- }
-
- // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
- // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
- // payment events should kick in, leaving us with no pending events here.
- let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
- nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-}
-
-#[test]
-fn test_dup_htlc_onchain_fails_on_reload() {
- do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
-}
-
#[test]
fn test_manager_serialize_deserialize_events() {
// This test makes sure the events field in ChannelManager survives de/serialization
//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
//! payments thereafter.
-use chain::{Confirm, Watch};
-use chain::channelmonitor::ChannelMonitor;
+use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
+use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
+use chain::transaction::OutPoint;
use ln::{PaymentPreimage, PaymentHash};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
+use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
use ln::features::InitFeatures;
use ln::msgs;
use ln::msgs::{ChannelMessageHandler, ErrorAction};
use util::errors::APIError;
use util::enforcing_trait_impls::EnforcingSigner;
use util::ser::{ReadableArgs, Writeable};
+use io;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
-use bitcoin::BlockHash;
+use bitcoin::{Block, BlockHeader, BlockHash};
use prelude::*;
do_retry_with_no_persist(true);
do_retry_with_no_persist(false);
}
+
+fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
+ // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
+ // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
+ // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
+ // the ChannelMonitor tells it to.
+ //
+ // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
+ // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
+ // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+ // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
+ // nodes[0].
+ let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
+ nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn[0], node_txn[1]);
+ check_spends!(node_txn[1], funding_tx);
+ check_spends!(node_txn[2], node_txn[1]);
+ let timeout_txn = vec![node_txn[2].clone()];
+
+ assert!(nodes[1].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[1], 1);
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+ header.prev_blockhash = nodes[0].best_block_hash();
+ connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
+
+ if confirm_commitment_tx {
+ connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
+ }
+
+ header.prev_blockhash = nodes[0].best_block_hash();
+ let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } };
+
+ if payment_timeout {
+ assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
+ connect_block(&nodes[0], &claim_block);
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+ }
+
+ // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
+ // returning TemporaryFailure. This should cause the claim event to never make its way to the
+ // ChannelManager.
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+
+ if payment_timeout {
+ connect_blocks(&nodes[0], 1);
+ } else {
+ connect_block(&nodes[0], &claim_block);
+ }
+
+ let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
+ let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
+ .get_mut(&funding_txo).unwrap().drain().collect();
+ assert_eq!(mon_updates.len(), 1);
+ assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+ // If we persist the ChannelManager here, we should get the PaymentSent event after
+ // deserialization.
+ let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
+ if !persist_manager_post_event {
+ nodes[0].node.write(&mut chan_manager_serialized).unwrap();
+ }
+
+ // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
+ // payment sent event.
+ chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+ get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
+ if payment_timeout {
+ expect_payment_failed!(nodes[0], payment_hash, true);
+ } else {
+ expect_payment_sent!(nodes[0], payment_preimage);
+ }
+
+ // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
+ // twice.
+ if persist_manager_post_event {
+ nodes[0].node.write(&mut chan_manager_serialized).unwrap();
+ }
+
+ // Now reload nodes[0]...
+ persister = test_utils::TestPersister::new();
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
+ nodes[0].chain_monitor = &new_chain_monitor;
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut chan_0_monitor_read, keys_manager).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let (_, nodes_0_deserialized_tmp) = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
+ ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
+ default_config: Default::default(),
+ keys_manager,
+ fee_estimator: node_cfgs[0].fee_estimator,
+ chain_monitor: nodes[0].chain_monitor,
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: nodes[0].logger,
+ channel_monitors,
+ }).unwrap()
+ };
+ nodes_0_deserialized = nodes_0_deserialized_tmp;
+
+ assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node = &nodes_0_deserialized;
+
+ if persist_manager_post_event {
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ } else if payment_timeout {
+ expect_payment_failed!(nodes[0], payment_hash, true);
+ } else {
+ expect_payment_sent!(nodes[0], payment_preimage);
+ }
+
+ // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
+ // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
+ // payment events should kick in, leaving us with no pending events here.
+ let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
+ nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+}
+
+#[test]
+fn test_dup_htlc_onchain_fails_on_reload() {
+ do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
+ do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
+ do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
+ do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
+ do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
+ do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
+}