//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
-use bitcoin::TxMerkleNode;
-use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::script::{Builder, Script};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
use lightning::ln::script::ShutdownScript;
+use lightning::ln::functional_test_utils::*;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::errors::APIError;
use lightning::util::logger::Logger;
pub struct TestBroadcaster {}
impl BroadcasterInterface for TestBroadcaster {
- fn broadcast_transaction(&self, _tx: &Transaction) { }
+ fn broadcast_transactions(&self, _txs: &[&Transaction]) { }
}
pub struct VecWriter(pub Vec<u8>);
}
#[inline]
-fn check_api_err(api_err: APIError) {
+fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) {
match api_err {
APIError::APIMisuseError { .. } => panic!("We can't misuse the API"),
APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"),
// is probably just stale and you should add new messages here.
match err.as_str() {
"Peer for first hop currently disconnected" => {},
- _ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
- _ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
- _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
- _ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {},
- _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
- _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
- _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {},
+ _ if err.starts_with("Cannot send less than our next-HTLC minimum - ") => {},
+ _ if err.starts_with("Cannot send more than our next-HTLC maximum - ") => {},
_ => panic!("{}", err),
}
+ assert!(sendable_bounds_violated);
},
APIError::MonitorUpdateInProgress => {
// We can (obviously) temp-fail a monitor update
}
}
#[inline]
-fn check_payment_err(send_err: PaymentSendFailure) {
+fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) {
match send_err {
- PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err),
+ PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err, sendable_bounds_violated),
PaymentSendFailure::PathParameterError(per_path_results) => {
- for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } }
+ for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
},
PaymentSendFailure::AllFailedResendSafe(per_path_results) => {
- for api_err in per_path_results { check_api_err(api_err); }
+ for api_err in per_path_results { check_api_err(api_err, sendable_bounds_violated); }
},
PaymentSendFailure::PartialFailure { results, .. } => {
- for res in results { if let Err(api_err) = res { check_api_err(api_err); } }
+ for res in results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
},
PaymentSendFailure::DuplicatePayment => panic!(),
}
let mut payment_id = [0; 32];
payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
*payment_idx += 1;
+ let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
+ .iter().find(|chan| chan.short_channel_id == Some(dest_chan_id))
+ .map(|chan|
+ (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
+ .unwrap_or((0, 0));
if let Err(err) = source.send_payment_with_route(&Route {
paths: vec![Path { hops: vec![RouteHop {
pubkey: dest.get_our_node_id(),
}], blinded_tail: None }],
payment_params: None,
}, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
- check_payment_err(err);
+ check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
false
- } else { true }
+ } else {
+ // Note that while the max is a strict upper-bound, we can occasionally send substantially
+ // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
+ // we don't check against min_value_sendable here.
+ assert!(amt <= max_value_sendable);
+ true
+ }
}
#[inline]
fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool {
let mut payment_id = [0; 32];
payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
*payment_idx += 1;
+ let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
+ .iter().find(|chan| chan.short_channel_id == Some(middle_chan_id))
+ .map(|chan|
+ (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
+ .unwrap_or((0, 0));
+ let first_hop_fee = 50_000;
if let Err(err) = source.send_payment_with_route(&Route {
paths: vec![Path { hops: vec![RouteHop {
pubkey: middle.get_our_node_id(),
node_features: middle.node_features(),
short_channel_id: middle_chan_id,
channel_features: middle.channel_features(),
- fee_msat: 50000,
+ fee_msat: first_hop_fee,
cltv_expiry_delta: 100,
},RouteHop {
pubkey: dest.get_our_node_id(),
}], blinded_tail: None }],
payment_params: None,
}, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
- check_payment_err(err);
+ let sent_amt = amt + first_hop_fee;
+ check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
false
- } else { true }
+ } else {
+ // Note that while the max is a strict upper-bound, we can occasionally send substantially
+ // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
+ // we don't check against min_value_sendable here.
+ assert!(amt + first_hop_fee <= max_value_sendable);
+ true
+ }
}
#[inline]
let mut channel_txn = Vec::new();
macro_rules! make_channel {
($source: expr, $dest: expr, $chan_id: expr) => { {
- $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap();
- $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap();
+ $source.peer_connected(&$dest.get_our_node_id(), &Init {
+ features: $dest.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ $dest.peer_connected(&$source.get_our_node_id(), &Init {
+ features: $source.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
let open_channel = {
macro_rules! confirm_txn {
($node: expr) => { {
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+ let mut header = create_dummy_header(chain_hash, 42);
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.transactions_confirmed(&header, &txdata, 1);
for _ in 2..100 {
- header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+ header = create_dummy_header(header.block_hash(), 42);
}
$node.best_block_updated(&header, 99);
} }
},
0x0e => {
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init {
+ features: nodes[1].init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init {
+ features: nodes[0].init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
chan_a_disconnected = false;
}
},
0x0f => {
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init {
+ features: nodes[2].init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init {
+ features: nodes[1].init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
chan_b_disconnected = false;
}
},
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init {
+ features: nodes[1].init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init {
+ features: nodes[0].init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
chan_a_disconnected = false;
}
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init {
+ features: nodes[2].init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init {
+ features: nodes[1].init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
chan_b_disconnected = false;
}