use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::{genesis_block, ChainHash};
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
+ /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+ /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+ /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+ /// route which is valid.
+ pub next_outbound_htlc_minimum_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new inbound HTLCs).
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+ next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
- /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
+ /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
provided_init_features(&self.default_configuration)
}
+ fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+ Some(vec![ChainHash::from(&self.genesis_hash[..])])
+ }
+
fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
(14, user_channel_id_low, required),
(16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
+ (21, self.next_outbound_htlc_minimum_msat, required),
(22, self.confirmations_required, option),
(24, self.force_close_spend_delay, option),
(26, self.is_outbound, required),
// filled in, so we can safely unwrap it here.
(19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
(20, inbound_capacity_msat, required),
+ (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
(22, confirmations_required, option),
(24, force_close_spend_delay, option),
(26, is_outbound, required),
balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+ next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
confirmations_required,
confirmations,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
// them if we have too many un-channel'd peers.
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// but of course if the connection is outbound its allowed...
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
// "protected" and can connect again.
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
// Further, because the first channel was funded, we can open another channel with
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
});
let node_b_holder = ANodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init {
+ features: node_b.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init {
+ features: node_a.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));