use crate::chain::channelmonitor;
use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
-use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
-use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
+use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use bitcoin::blockdata::script::{Builder, ScriptBuf};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::constants::ChainHash;
-use bitcoin::network::constants::Network;
-use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
+use bitcoin::network::Network;
+use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness};
use bitcoin::OutPoint as BitcoinOutPoint;
+use bitcoin::transaction::Version;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::{PublicKey,SecretKey};
-use regex;
-
use crate::io;
use crate::prelude::*;
use alloc::collections::BTreeSet;
-use core::default::Default;
use core::iter::repeat;
use bitcoin::hashes::Hash;
use crate::sync::{Arc, Mutex, RwLock};
use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+#[test]
+fn test_channel_resumption_fail_post_funding() {
+ // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the
+ // channel open on reconnect, however if we do exchange funding we do not currently support
+ // replaying it and here test that the channel closes.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap();
+ let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan);
+ let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan);
+
+ let (temp_chan_id, tx, funding_output) =
+ create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+ let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output);
+ nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]);
+
+ // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that
+ // explicitly here.
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
+}
+
#[test]
fn test_insane_channel_opens() {
// Stand up a network of 2 nodes
chan_context.holder_selected_channel_reserve_satoshis = 0;
chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
},
- ChannelPhase::Funded(_) => assert!(false),
+ _ => assert!(false),
}
}
//We made sure neither party's funds are below the dust limit and there are no HTLCs here
assert_eq!(commitment_tx.output.len(), 2);
let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
- let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
+ let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat());
actual_fee = channel_value - actual_fee;
assert_eq!(total_fee, actual_fee);
}
send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
let mut has_both_htlcs = 0; // check htlcs match ones committed
for outp in remote_txn[0].output.iter() {
- if outp.value == 800_000 / 1000 {
+ if outp.value.to_sat() == 800_000 / 1000 {
has_both_htlcs += 1;
- } else if outp.value == 900_000 / 1000 {
+ } else if outp.value.to_sat() == 900_000 / 1000 {
has_both_htlcs += 1;
}
}
assert_eq!(preimage_tx.input.len(), 1);
assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
- assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
+ assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800);
assert_eq!(timeout_tx.input.len(), 1);
assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
check_spends!(timeout_tx, remote_txn[0]);
- assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
+ assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 3);
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
- let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
+ let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
- 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
+ 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
+ let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
- 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
+ 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
+ let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
- &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+ &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret_21, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret_22, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
- nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
let events = nodes[3].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- let close_chan_update_1 = match events[0] {
+ let close_chan_update_1 = match events[1] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
- match events[1] {
+ match events[0] {
MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
assert_eq!(node_id, nodes[4].node.get_our_node_id());
},
connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
let events = nodes[4].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- let close_chan_update_2 = match events[0] {
+ let close_chan_update_2 = match events[1] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
- match events[1] {
+ match events[0] {
MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
assert_eq!(node_id, nodes[3].node.get_our_node_id());
},
}
check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
- check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
mine_transaction(&nodes[4], &node_txn[0]);
check_preimage_claim(&nodes[4], &node_txn);
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
Ok(ChannelMonitorUpdateStatus::Completed));
- check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
}
#[test]
fn test_justice_tx_htlc_timeout() {
// Test justice txn built on revoked HTLC-Timeout tx, against both sides
- let mut alice_config = UserConfig::default();
+ let mut alice_config = test_default_channel_config();
alice_config.channel_handshake_config.announced_channel = true;
alice_config.channel_handshake_limits.force_announced_channel_preference = false;
alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
- let mut bob_config = UserConfig::default();
+ let mut bob_config = test_default_channel_config();
bob_config.channel_handshake_config.announced_channel = true;
bob_config.channel_handshake_limits.force_announced_channel_preference = false;
bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
#[test]
fn test_justice_tx_htlc_success() {
// Test justice txn built on revoked HTLC-Success tx, against both sides
- let mut alice_config = UserConfig::default();
+ let mut alice_config = test_default_channel_config();
alice_config.channel_handshake_config.announced_channel = true;
alice_config.channel_handshake_limits.force_announced_channel_preference = false;
alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
- let mut bob_config = UserConfig::default();
+ let mut bob_config = test_default_channel_config();
bob_config.channel_handshake_config.announced_channel = true;
bob_config.channel_handshake_limits.force_announced_channel_preference = false;
bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
}
});
// On the first commitment, node[1]'s balance was below dust so it didn't have an output
- let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
- let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
+ let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() };
+ let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat();
assert_eq!(total_claimable_balance, expected_claimable_balance);
}
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let mut events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
check_spends!(node_txn[1], commitment_tx[0]);
assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
- assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
if $htlc_offered {
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
} else {
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
- assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
- assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
+ assert!(node_txn[1].output[0].script_pubkey.is_p2wpkh()); // direct payment
}
node_txn.clear();
} }
assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
- assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment
// We don't bother to check that B can claim the HTLC output on its commitment tx here as
// we already checked the same situation with A.
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event"),
- };
- match events[1] {
Event::HTLCHandlingFailed { .. } => { },
_ => panic!("Unexpected event"),
}
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
- let mut failed_htlcs = HashSet::new();
+ let mut failed_htlcs = new_hash_set();
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
mine_transaction(&nodes[1], &revoked_local_txn[0]);
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
+ let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
- &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
+ &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
return;
}
let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
-
+ let error_message = "Channel force-closed";
route_payment(&nodes[0], &[&nodes[1]], 10000000);
- nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
// nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
// state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
-
- nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
#[test]
fn test_peer_disconnected_before_funding_broadcasted() {
// Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
- // before the funding transaction has been broadcasted.
+ // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
}
- // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
- // disconnected before the funding transaction was broadcasted.
+ // The peers disconnect before the funding is broadcasted.
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ // The time for peers to reconnect expires.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ }
+
+ // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a
+ // `DiscardFunding` event when the peers are disconnected and do not reconnect before the
+ // funding transaction is broadcasted.
check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
, [nodes[1].node.get_our_node_id()], 1000000);
check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3)
+ .skip_last(true)
+ );
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_1, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
assert_eq!(payment_hash_2, *payment_hash);
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
- nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let error_message = "Channel force-closed";
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
- nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
mine_transaction(&nodes[1], &commitment_tx[0]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
+ match events[1] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
assert_eq!(c_txn.len(), 1);
check_spends!(c_txn[0], commitment_tx[0]);
assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
- assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
// So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
assert_eq!(b_txn.len(), 1);
check_spends!(b_txn[0], commitment_tx[0]);
assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert!(b_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
check_closed_broadcast!(nodes[1], true);
// (with value 900 sats) will be claimed in the below `claim_funds` call.
if node_txn.len() > 2 {
assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
- htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
+ htlc_timeout_tx = if node_txn[2].output[0].value.to_sat() < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
} else {
- htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
+ htlc_timeout_tx = if node_txn[0].output[0].value.to_sat() < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
}
}
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
- expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
let as_events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
- let mut as_failds = HashSet::new();
+ let mut as_faileds = new_hash_set();
let mut as_updates = 0;
for event in as_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
- assert!(as_failds.insert(*payment_hash));
+ assert!(as_faileds.insert(*payment_hash));
if *payment_hash != payment_hash_2 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
} else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
- assert!(as_failds.contains(&payment_hash_1));
- assert!(as_failds.contains(&payment_hash_2));
+ assert!(as_faileds.contains(&payment_hash_1));
+ assert!(as_faileds.contains(&payment_hash_2));
if announce_latest {
- assert!(as_failds.contains(&payment_hash_3));
- assert!(as_failds.contains(&payment_hash_5));
+ assert!(as_faileds.contains(&payment_hash_3));
+ assert!(as_faileds.contains(&payment_hash_5));
}
- assert!(as_failds.contains(&payment_hash_6));
+ assert!(as_faileds.contains(&payment_hash_6));
let bs_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
- let mut bs_failds = HashSet::new();
+ let mut bs_faileds = new_hash_set();
let mut bs_updates = 0;
for event in bs_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
- assert!(bs_failds.insert(*payment_hash));
+ assert!(bs_faileds.insert(*payment_hash));
if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
} else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
- assert!(bs_failds.contains(&payment_hash_1));
- assert!(bs_failds.contains(&payment_hash_2));
+ assert!(bs_faileds.contains(&payment_hash_1));
+ assert!(bs_faileds.contains(&payment_hash_2));
if announce_latest {
- assert!(bs_failds.contains(&payment_hash_4));
+ assert!(bs_faileds.contains(&payment_hash_4));
}
- assert!(bs_failds.contains(&payment_hash_5));
+ assert!(bs_faileds.contains(&payment_hash_5));
// For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
// get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
- let message_router = test_utils::TestMessageRouter::new(network_graph.clone());
+ let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
mine_transaction(&nodes[0], &closing_tx);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
check_spends!(spend_txn[0], closing_tx);
mine_transaction(&nodes[1], &closing_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
assert_eq!(process_htlc_forwards_event.len(), 2);
- match &process_htlc_forwards_event[0] {
+ match &process_htlc_forwards_event[1] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
}
get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
route.paths[0].hops[0].fee_msat = send_amt;
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
+ let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
- &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
+ &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
let mut msg = msgs::UpdateAddHTLC {
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ // Connect a dummy node for proper future events broadcasting
+ connect_dummy_node(&nodes[0]);
+
create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 1, 0);
create_announced_chan_between_nodes(&nodes, 0, 1);
}
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let mut chans_disabled = HashMap::new();
+ let mut chans_disabled = new_hash_map();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
let mut penalty_sum = 0;
for outp in revoked_txn[0].output.iter() {
- if outp.script_pubkey.is_v0_p2wsh() {
- penalty_sum += outp.value;
+ if outp.script_pubkey.is_p2wsh() {
+ penalty_sum += outp.value.to_sat();
}
}
assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
assert_eq!(node_txn[0].output.len(), 1);
check_spends!(node_txn[0], revoked_txn[0]);
- let fee_1 = penalty_sum - node_txn[0].output[0].value;
+ let fee_1 = penalty_sum - node_txn[0].output[0].value.to_sat();
feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
penalty_1 = node_txn[0].txid();
node_txn.clear();
penalty_2 = node_txn[0].txid();
// Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
assert_ne!(penalty_2, penalty_1);
- let fee_2 = penalty_sum - node_txn[0].output[0].value;
+ let fee_2 = penalty_sum - node_txn[0].output[0].value.to_sat();
feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
// Verify 25% bump heuristic
assert!(feerate_2 * 100 >= feerate_1 * 125);
penalty_3 = node_txn[0].txid();
// Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
assert_ne!(penalty_3, penalty_2);
- let fee_3 = penalty_sum - node_txn[0].output[0].value;
+ let fee_3 = penalty_sum - node_txn[0].output[0].value.to_sat();
feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
// Verify 25% bump heuristic
assert!(feerate_3 * 100 >= feerate_2 * 125);
let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
- send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
+ let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
connect_block(&nodes[0], &block_129);
let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
preimage = node_txn[0].txid();
let index = node_txn[0].input[0].previous_output.vout;
- let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
+ let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
timeout = timeout_tx.txid();
let index = timeout_tx.input[0].previous_output.vout;
- let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
+ let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat();
feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
node_txn.clear();
check_spends!(preimage_bump, remote_txn[0]);
let index = preimage_bump.input[0].previous_output.vout;
- let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
+ let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat();
let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
assert!(new_feerate * 100 > feerate_timeout * 125);
assert_ne!(timeout, preimage_bump.txid());
let index = node_txn[0].input[0].previous_output.vout;
- let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
+ let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
assert!(new_feerate * 100 > feerate_preimage * 125);
assert_ne!(preimage, node_txn[0].txid());
}
_ => panic!("Unexpected event"),
}
-
- nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(close_msg_ev.len(), 1);
// Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
// rejecting the inbound channel request.
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
+ let error_message = "Channel force-closed";
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
}
_ => panic!("Unexpected event"),
}
let height = nodes[0].best_block_info().1;
let session_priv = SecretKey::from_slice(&session_priv).unwrap();
let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+ let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
- RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
+ &recipient_onion_fields, height + 1, &None).unwrap();
// Edit amt_to_forward to simulate the sender having set
// the final amount and the routing node taking less fee
if let msgs::OutboundOnionPayload::Receive {
let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
- claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage)
+ );
}
fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
}
- claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage)
+ );
}
#[test]
route.paths[1].hops[0].short_channel_id = chan_2_id;
route.paths[1].hops[1].short_channel_id = chan_4_id;
send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
- claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
+ );
}
#[test]
match events[0] {
Event::PaymentClaimable { ref purpose, .. } => {
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
[nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
force_closing_node = 1;
counterparty_node = 0;
}
- nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
let wit_program_script: ScriptBuf = wit_program.into();
for output in tx.output.iter_mut() {
// Make the confirmed funding transaction have a bogus script_pubkey
- output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
+ output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash());
}
nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
// long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
// as its not 32 bytes long.
let mut spend_tx = Transaction {
- version: 2i32, lock_time: LockTime::ZERO,
+ version: Version::TWO, lock_time: LockTime::ZERO,
input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
previous_output: BitcoinOutPoint {
txid: tx.txid(),
witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
}).collect(),
output: vec![TxOut {
- value: 1000,
+ value: Amount::from_sat(1000),
script_pubkey: ScriptBuf::new(),
}]
};
let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
-
- nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
- do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
+ do_claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
+ );
expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
}
AtUpdateFeeOutbound,
}
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
// Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
// policy.
//
let chanmon_cfgs = create_chanmon_cfgs(2);
let mut config = test_default_channel_config();
+
+ // We hard-code the feerate values here but they're re-calculated furter down and asserted.
+ // If the values ever change below these constants should simply be updated.
+ const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
+ let nondust_htlc_count_in_limit =
+ if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
+ AT_FEE_OUTBOUND_HTLCS
+ } else { 0 };
+ let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
+ let expected_dust_buffer_feerate = initial_feerate + 2530;
+ let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
+ commitment_tx_cost +=
+ if on_holder_tx {
+ htlc_success_tx_weight(&ChannelTypeFeatures::empty())
+ } else {
+ htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
+ } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = initial_feerate;
+ }
config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
// Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
// to get roughly the same initial value as the default setting when this test was
// originally written.
- MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
- } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
+ MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
+ } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = 253;
+ }
+
// Fetch a route in advance as we will be unable to once we're unable to send.
let (mut route, payment_hash, _, payment_secret) =
get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
(chan.context().get_dust_buffer_feerate(None) as u64,
- chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+ chan.context().get_max_dust_htlc_exposure_msat(253))
};
+ assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
- let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
+ // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
+ // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
+ // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
+ let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+ // This test was written with a fixed dust value here, which we retain, but assert that it is,
+ // indeed, dust on both transactions.
let dust_htlc_on_counterparty_tx: u64 = 4;
- let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+ let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
+ let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
+ assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
+ assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
if on_holder_tx {
if dust_outbound_balance {
// Outbound dust balance: 5200 sats
nodes[0].logger.assert_log("lightning::ln::channel",
format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
+ dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
max_dust_htlc_exposure_msat), 1);
}
} else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
// For the multiplier dust exposure limit, since it scales with feerate,
// we need to add a lot of HTLCs that will become dust at the new feerate
// to cross the threshold.
- for _ in 0..20 {
+ for _ in 0..AT_FEE_OUTBOUND_HTLCS {
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
nodes[0].node.send_payment_with_route(&route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
added_monitors.clear();
}
-fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
+ if !multiplier_dust_limit && !apply_excess_fee {
+ // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
+ // increase the fee to hit a higher dust exposure with a
+ // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
+ // in the `multiplier_dust_limit` case.
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
+ }
}
#[test]
fn test_max_dust_htlc_exposure() {
- do_test_max_dust_htlc_exposure_by_threshold_type(false);
- do_test_max_dust_htlc_exposure_by_threshold_type(true);
+ do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
+ do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
+ do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
+ do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
+}
+
+#[test]
+fn test_nondust_htlc_fees_are_dust() {
+ // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+ let mut config = test_default_channel_config();
+ // Set the dust limit to the default value
+ config.channel_config.max_dust_htlc_exposure =
+ MaxDustHTLCExposure::FeeRateMultiplier(10_000);
+ // Make sure the HTLC limits don't get in the way
+ config.channel_handshake_limits.min_max_accepted_htlcs = 400;
+ config.channel_handshake_config.our_max_accepted_htlcs = 400;
+ config.channel_handshake_config.our_htlc_minimum_msat = 1;
+
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
+ while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
+ send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
+ }
+
+ // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
+ // repeatedly until we run out of space.
+ const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
+ let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
+
+ while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
+ route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
+ }
+ assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
+ "We don't want to run out of ability to send because of some non-dust limit");
+ assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
+ "We should be able to fill our dust limit without too many HTLCs");
+
+ let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+ assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
+ "Make sure we are able to send once we clear one HTLC");
+
+ // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
+ // exposure limit, and we want to max that out using non-dust HTLCs.
+ let commitment_tx_per_htlc_cost =
+ htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
+ let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
+ assert!(max_htlcs_remaining < 30,
+ "We should be able to fill our dust limit without too many HTLCs");
+ for i in 0..max_htlcs_remaining + 1 {
+ assert_ne!(i, max_htlcs_remaining);
+ if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
+ // We found our limit, and it was less than max_htlcs_remaining!
+ // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
+ // remaining dust exposure.
+ break;
+ }
+ route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
+ }
+
+ // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
+ // such HTLCs can't be routed over the same channel either.
+ create_announced_chan_between_nodes(&nodes, 2, 0);
+ let (route, payment_hash, _, payment_secret) =
+ get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
+ let onion = RecipientOnionFields::secret_only(payment_secret);
+ nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
+ check_added_monitors(&nodes[2], 1);
+ let send = SendEvent::from_node(&nodes[2]);
+
+ nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ check_added_monitors(&nodes[0], 1);
+ let node_id_1 = nodes[1].node.get_our_node_id();
+ expect_htlc_handling_failed_destinations!(
+ nodes[0].node.get_and_clear_pending_events(),
+ &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
+ );
+
+ let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
+ expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
}
+
#[test]
fn test_non_final_funding_tx() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
- let best_height = nodes[0].node.best_block.read().unwrap().height();
+ let best_height = nodes[0].node.best_block.read().unwrap().height;
let chan_id = *nodes[0].network_chan_count.borrow();
let events = nodes[0].node.get_and_clear_pending_events();
let mut tx = match events[0] {
Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
// Timelock the transaction _beyond_ the best client height + 1.
- Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
- value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+ Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
+ value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
}]}
},
_ => panic!("Unexpected event"),
},
_ => panic!()
}
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::ChannelClosed { channel_id, .. } => {
- assert_eq!(channel_id, temp_channel_id);
- },
- _ => panic!("Unexpected event"),
- }
+ let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
+ assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
}
#[test]
let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
- let best_height = nodes[0].node.best_block.read().unwrap().height();
+ let best_height = nodes[0].node.best_block.read().unwrap().height;
let chan_id = *nodes[0].network_chan_count.borrow();
let events = nodes[0].node.get_and_clear_pending_events();
let mut tx = match events[0] {
Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
// Timelock the transaction within a +1 headroom from the best block.
- Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
- value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+ Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
+ value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
}]}
},
_ => panic!("Unexpected event"),
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
}
+#[test]
+fn test_channel_close_when_not_timely_accepted() {
+ // Create network of two nodes
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Simulate peer-disconnects mid-handshake
+ // The channel is initiated from the node 0 side,
+ // but the nodes disconnect before node 1 could send accept channel
+ let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // In the meantime, some time passes.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ }
+
+ // Since we disconnected from peer and did not connect back within time,
+ // we should have forced-closed the channel by now.
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+
+ {
+ // Since accept channel message was never received
+ // The channel should be forced close by now from node 0 side
+ // and the peer removed from per_peer_state
+ let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ assert_eq!(node_0_per_peer_state.len(), 0);
+ }
+}
+
+#[test]
+fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
+ // Create network of two nodes
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Simulate peer-disconnects mid-handshake
+ // The channel is initiated from the node 0 side,
+ // but the nodes disconnect before node 1 could send accept channel
+ let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // The peers now reconnect
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+
+ // Make sure the SendOpenChannel message is added to node_0 pending message events
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match &msg_events[0] {
+ MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
+ _ => panic!("Unexpected message."),
+ }
+}
+
fn do_test_multi_post_event_actions(do_reload: bool) {
// Tests handling multiple post-Event actions at once.
// There is specific code in ChannelManager to handle channels where multiple post-Event
}
#[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+ // This test ensures that if one of the channels
+ // in the batch closes, the complete batch will close.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
// The transaction should not have been broadcast before all channels are ready.
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
- // The remaining peer in the batch disconnects.
- nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
-
- // The channels in the batch will close immediately.
+ // Force-close the channel for which we've completed the initial monitor.
let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+ let error_message = "Channel force-closed";
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
+
+ // The monitor should become closed.
+ check_added_monitors(&nodes[0], 1);
+ {
+ let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+ let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+ assert_eq!(monitor_updates_1.len(), 1);
+ assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ }
+
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ match msg_events[0] {
+ MessageSendEvent::HandleError { .. } => (),
+ _ => panic!("Unexpected message."),
+ }
+
+ // We broadcast the commitment transaction as part of the force-close.
+ {
+ let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(broadcasted_txs.len(), 1);
+ assert!(broadcasted_txs[0].txid() != tx.txid());
+ assert_eq!(broadcasted_txs[0].input.len(), 1);
+ assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+ }
+
+ // All channels in the batch should close immediately.
check_closed_events(&nodes[0], &[
ExpectedCloseEvent {
channel_id: Some(channel_id_1),
},
]);
- // The monitor should become closed.
- check_added_monitors(&nodes[0], 1);
- {
- let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
- let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
- assert_eq!(monitor_updates_1.len(), 1);
- assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
- }
-
- // The funding transaction should not have been broadcast, and therefore, we don't need
- // to broadcast a force-close transaction for the closed monitor.
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
// Ensure the channels don't exist anymore.
assert!(nodes[0].node.list_channels().is_empty());
}
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
- nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors(&nodes[0], 2);
{
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
} else {
(&nodes[0], &nodes[1])
};
-
- closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap();
let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 1);
match msg_events.pop().unwrap() {
do_test_funding_and_commitment_tx_confirm_same_block(false);
do_test_funding_and_commitment_tx_confirm_same_block(true);
}
+
+#[test]
+fn test_accept_inbound_channel_errors_queued() {
+ // For manually accepted inbound channels, tests that a close error is correctly handled
+ // and the channel fails for the initiator.
+ let mut config0 = test_default_channel_config();
+ let mut config1 = config0.clone();
+ config1.channel_handshake_limits.their_to_self_delay = 1000;
+ config1.manually_accept_inbound_channels = true;
+ config0.channel_handshake_config.our_to_self_delay = 2000;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
+ Err(APIError::ChannelUnavailable { err: _ }) => (),
+ _ => panic!(),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.common_fields.temporary_channel_id);
+}