X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=50e65db3f67dabc6227491b634e93cb404c51787;hb=99985c620962fd35163ee8b585c3a7edb6491db7;hp=fc0cd6d539050a209e139b4386e74efa0c861497;hpb=8fd8966b9a7e798184c89acf7a4a9789c309f735;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fc0cd6d5..50e65db3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19,7 +19,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; -use bitcoin::blockdata::constants::genesis_block; +use bitcoin::blockdata::constants::{genesis_block, ChainHash}; use bitcoin::network::constants::Network; use bitcoin::hashes::Hash; @@ -1373,8 +1373,14 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, + /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of + /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than + /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a + /// route which is valid. + pub next_outbound_htlc_minimum_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). @@ -1494,6 +1500,7 @@ impl ChannelDetails { inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), @@ -2964,7 +2971,7 @@ where self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } - /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on + /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on /// `route_params` and retry failed payment paths based on `retry_strategy`. pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); @@ -7017,6 +7024,10 @@ where provided_init_features(&self.default_configuration) } + fn get_genesis_hashes(&self) -> Option> { + Some(vec![ChainHash::from(&self.genesis_hash[..])]) + } + fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), @@ -7166,10 +7177,9 @@ impl Writeable for ChannelDetails { (14, user_channel_id_low, required), (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), + (21, self.next_outbound_htlc_minimum_msat, required), (22, self.confirmations_required, option), (24, self.force_close_spend_delay, option), (26, self.is_outbound, required), @@ -7206,6 +7216,7 @@ impl Readable for ChannelDetails { // filled in, so we can safely unwrap it here. (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), (20, inbound_capacity_msat, required), + (21, next_outbound_htlc_minimum_msat, (default_value, 0)), (22, confirmations_required, option), (24, force_close_spend_delay, option), (26, is_outbound, required), @@ -7239,6 +7250,7 @@ impl Readable for ChannelDetails { balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), + next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), confirmations_required, confirmations, @@ -9327,12 +9339,14 @@ mod tests { &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); peer_pks.push(random_pk); nodes[1].node.peer_connected(&random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); } let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap_err(); // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from // them if we have too many un-channel'd peers. @@ -9343,13 +9357,16 @@ mod tests { if let Event::ChannelClosed { .. } = ev { } else { panic!(); } } nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap_err(); // but of course if the connection is outbound its allowed... nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around. @@ -9373,7 +9390,8 @@ mod tests { // "protected" and can connect again. mine_transaction(&nodes[1], funding_tx.as_ref().unwrap()); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); // Further, because the first channel was funded, we can open another channel with @@ -9438,7 +9456,8 @@ mod tests { let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); @@ -9456,7 +9475,8 @@ mod tests { let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { @@ -9656,8 +9676,12 @@ pub mod bench { }); let node_b_holder = ANodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { + features: node_b.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { + features: node_a.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));