use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use crate::ln::{chan_utils, onion_utils};
let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
let mut sender_node_per_peer_lock;
let mut sender_node_peer_state_lock;
- if send_from_initiator {
- let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
- chan.context.holder_selected_channel_reserve_satoshis = 0;
- chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
- } else {
- let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
- chan.context.holder_selected_channel_reserve_satoshis = 0;
- chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+
+ let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+ match channel_phase {
+ ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+ let chan_context = channel_phase.context_mut();
+ chan_context.holder_selected_channel_reserve_satoshis = 0;
+ chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ },
+ ChannelPhase::Funded(_) => assert!(false),
}
}
let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = local_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = local_chan.get_signer();
// Make the signer believe we validated another commitment, so we can release the secret
chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
commitment_number,
// The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
// well, so HTLCs at exactly the dust limit will not be included in commitment txn.
nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
} else { 3000000 };
let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
}
+ #[test]
+ fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let channel_manager get ahead of chain_monitor by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on channel_manager first and then on chain_monitor.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
+ // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
+ // was running ahead of chain_monitor at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+ }
+
+ #[test]
+ fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let chain_monitor get ahead of channel_manager by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on chain_monitor first and then on channel_manager.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // channel_manager can't really skip block_1, it should get it eventually.
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
+ // the block before block_1, since that was populated by channel_manager, and channel_manager was
+ // running behind at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+ }
+
#[test]
fn test_drop_messages_peer_disconnect_dual_htlc() {
// Test that we can handle reconnecting when both sides of a channel have pending
assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
// Fetch a route in advance as we will be unable to once we're unable to send.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
- htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
+ htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
}
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
+ let keys = guard.channel_by_id.get_mut(&channel_id).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
- assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- } else { assert!(false); }
+ if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+ if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ } else { assert!(false); }
+ } else {
+ assert!(false);
+ }
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- // Watchtower Alice should already have seen the block and reject the update
- assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
- assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- } else { assert!(false); }
+ if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+ if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ // Watchtower Alice should already have seen the block and reject the update
+ assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ } else { assert!(false); }
+ } else {
+ assert!(false);
+ }
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
- let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
- let logger = test_utils::TestLogger::new();
- as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+ match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ let logger = test_utils::TestLogger::new();
+ chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+ },
+ _ => panic!("Unexpected ChannelPhase variant"),
+ }
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
if on_holder_tx {
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
- chan.context.holder_dust_limit_satoshis = 546;
+ match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ chan.context.holder_dust_limit_satoshis = 546;
+ },
+ _ => panic!("Unexpected ChannelPhase variant"),
+ }
}
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
- (chan.context.get_dust_buffer_feerate(None) as u64,
- chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+ (chan.context().get_dust_buffer_feerate(None) as u64,
+ chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
let check_outbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.
let check_inbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.