use crate::chain::channelmonitor;
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
} else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_args.pending_responding_commitment_signed.0 = true;
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_args.pending_responding_commitment_signed.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// nodes[1] still wants its final RAA
} else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_args.pending_responding_commitment_signed.1 = true;
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_args.pending_responding_commitment_signed.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[1] still wants its final RAA
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
}
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let channel_manager get ahead of chain_monitor by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on channel_manager first and then on chain_monitor.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
+ // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
+ // was running ahead of chain_monitor at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let chain_monitor get ahead of channel_manager by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on chain_monitor first and then on channel_manager.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // channel_manager can't really skip block_1, it should get it eventually.
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
+ // the block before block_1, since that was populated by channel_manager, and channel_manager was
+ // running behind at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
#[test]
fn test_drop_messages_peer_disconnect_dual_htlc() {
// Test that we can handle reconnecting when both sides of a channel have pending
assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
- expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
+ let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
// If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
if !go_onchain_before_fulfill && broadcast_alice {
let events = nodes[1].node.get_and_clear_pending_msg_events();