use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
-use ln::router::Route;
-use ln::features::InitFeatures;
+use ln::features::{InitFeatures, NodeFeatures};
+use ln::router::{Route, RouteHop};
use ln::msgs;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
-use chain::keysinterface::{ChannelKeys, KeysInterface};
+use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys};
use util::config::UserConfig;
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use util::chacha20::ChaCha20;
+use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::Logger;
use util::errors::APIError;
use std::{cmp, mem};
use std::collections::{HashMap, hash_map, HashSet};
-use std::io::Cursor;
+use std::io::{Cursor, Read};
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
-
-const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65];
+use std::marker::{Sync, Send};
+use std::ops::Deref;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
// forward the HTLC with information it will give back to us when it does so, or if it should Fail
// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
//
-// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
-// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
-// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
-// the HTLC backwards along the relevant path).
+// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
+// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
+// with it to track where it came from (in case of onwards-forward error), waiting a random delay
+// before we forward it.
+//
+// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
+// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
+// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
-/// Stores the info we will need to send when we want to forward an HTLC onwards
+
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) struct PendingForwardHTLCInfo {
- onion_packet: Option<msgs::OnionPacket>,
+enum PendingForwardReceiveHTLCInfo {
+ Forward {
+ onion_packet: msgs::OnionPacket,
+ short_channel_id: u64, // This should be NonZero<u64> eventually
+ },
+ Receive {
+ payment_data: Option<msgs::FinalOnionHopData>,
+ incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+ },
+}
+
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+pub(super) struct PendingHTLCInfo {
+ type_data: PendingForwardReceiveHTLCInfo,
incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
- short_channel_id: u64,
pub(super) amt_to_forward: u64,
pub(super) outgoing_cltv_value: u32,
}
/// Stores whether we can't forward an HTLC or relevant forwarding info
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub(super) enum PendingHTLCStatus {
- Forward(PendingForwardHTLCInfo),
+ Forward(PendingHTLCInfo),
Fail(HTLCFailureMsg),
}
+pub(super) enum HTLCForwardInfo {
+ AddHTLC {
+ prev_short_channel_id: u64,
+ prev_htlc_id: u64,
+ forward_info: PendingHTLCInfo,
+ },
+ FailHTLC {
+ htlc_id: u64,
+ err_packet: msgs::OnionErrorPacket,
+ },
+}
+
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, PartialEq)]
pub(super) struct HTLCPreviousHopData {
incoming_packet_shared_secret: [u8; 32],
}
+struct ClaimableHTLC {
+ src: HTLCPreviousHopData,
+ value: u64,
+ payment_data: Option<msgs::FinalOnionHopData>,
+ cltv_expiry: u32,
+}
+
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, PartialEq)]
pub(super) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
- route: Route,
+ path: Vec<RouteHop>,
session_priv: SecretKey,
/// Technically we can recalculate this from the route, but we cache it here to avoid
/// doing a double-pass on route when we get a failure back
impl HTLCSource {
pub fn dummy() -> Self {
HTLCSource::OutboundRoute {
- route: Route { hops: Vec::new() },
+ path: Vec::new(),
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
}
/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
-pub(super) enum HTLCForwardInfo {
- AddHTLC {
- prev_short_channel_id: u64,
- prev_htlc_id: u64,
- forward_info: PendingForwardHTLCInfo,
- },
- FailHTLC {
- htlc_id: u64,
- err_packet: msgs::OnionErrorPacket,
- },
-}
-
/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
/// be sent in the order they appear in the return value, however sometimes the order needs to be
/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the existence of a channel with the short id here, nor the short
- /// ids in the PendingForwardHTLCInfo!
+ /// ids in the PendingHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
- /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
- /// can be failed/claimed by the user
+ /// (payment_hash, payment_secret) -> Vec<HTLCs> for tracking things that
+ /// were to us and can be failed/claimed by the user
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
- pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
+ /// TODO: We need to time out HTLCs sitting here which are waiting on other AMP HTLCs to
+ /// arrive.
+ claimable_htlcs: HashMap<(PaymentHash, Option<[u8; 32]>), Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
}
+/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
+/// the latest Init features we heard from the peer.
+struct PeerState {
+ latest_features: InitFeatures,
+}
+
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
+/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
+/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
+/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
+/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
+/// issues such as overly long function definitions.
+pub type SimpleArcChannelManager<M> = Arc<ChannelManager<InMemoryChannelKeys, Arc<M>>>;
+
+/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
+/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
+/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
+/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
+/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
+/// helps with issues such as long function definitions.
+pub type SimpleRefChannelManager<'a, M> = ChannelManager<InMemoryChannelKeys, &'a M>;
+
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfec.
-pub struct ChannelManager<ChanSigner: ChannelKeys> {
+/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+///
+/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
+/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
+/// essentially you should default to using a SimpleRefChannelManager, and use a
+/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
+/// you're using lightning-net-tokio.
+pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref> where M::Target: ManyChannelMonitor {
default_configuration: UserConfig,
genesis_hash: Sha256dHash,
fee_estimator: Arc<FeeEstimator>,
- monitor: Arc<ManyChannelMonitor>,
+ monitor: M,
tx_broadcaster: Arc<BroadcasterInterface>,
#[cfg(test)]
channel_state: Mutex<ChannelHolder<ChanSigner>>,
our_network_key: SecretKey,
+ last_node_announcement_serial: AtomicUsize,
+
+ /// The bulk of our storage will eventually be here (channels and message queues and the like).
+ /// If we are connected to a peer we always at least have an entry here, even if no channels
+ /// are currently open with that peer.
+ /// Because adding or removing an entry is rare, we usually take an outer read lock and then
+ /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
+ /// new channel.
+ per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+
pending_events: Mutex<Vec<events::Event>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
-macro_rules! secp_call {
- ( $res: expr, $err: expr ) => {
- match $res {
- Ok(key) => key,
- Err(_) => return Err($err),
- }
- };
-}
-
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
pub short_channel_id: Option<u64>,
/// The node_id of our counterparty
pub remote_network_id: PublicKey,
+ /// The Features the channel counterparty provided upon last connection.
+ /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
+ /// many routing-relevant features are present in the init context.
+ pub counterparty_features: InitFeatures,
/// The value, in satoshis, of this channel as appears in the funding output
pub channel_value_satoshis: u64,
/// The user_id passed in to create_channel, or 0 if the channel was inbound.
pub is_live: bool,
}
+/// If a payment fails to send, it can be in one of several states. This enum is returned as the
+/// Err() type describing which state the payment is in, see the description of individual enum
+/// states for more.
+#[derive(Debug)]
+pub enum PaymentSendFailure {
+ /// A parameter which was passed to send_payment was invalid, preventing us from attempting to
+ /// send the payment at all. No channel state has been changed or messages sent to peers, and
+ /// once you've changed the parameter at error, you can freely retry the payment in full.
+ ParameterError(APIError),
+ /// All paths which were attempted failed to send, with no channel state change taking place.
+ /// You can freely retry the payment in full (though you probably want to do so over different
+ /// paths than the ones selected).
+ AllFailedRetrySafe(Vec<APIError>),
+ /// Some paths which were attempted failed to send, though possibly not all. At least some
+ /// paths have irrevocably committed to the HTLC and retrying the payment in full would result
+ /// in over-/re-payment.
+ ///
+ /// The results here are ordered the same as the paths in the route object which was passed to
+ /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
+ /// retried.
+ ///
+ /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
+ /// as they will result in over-/re-payment.
+ PartialFailure(Vec<Result<(), APIError>>),
+}
+
macro_rules! handle_error {
- ($self: ident, $internal: expr, $their_node_id: expr, $locked_channel_state: expr) => {
+ ($self: ident, $internal: expr, $their_node_id: expr) => {
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ let mut channel_state = None;
if let Some((shutdown_res, update_option)) = shutdown_finish {
$self.finish_force_close_channel(shutdown_res);
if let Some(update) = update_option {
- $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ channel_state = Some($self.channel_state.lock().unwrap());
+ channel_state.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
+ #[cfg(debug_assertions)]
+ {
+ // In testing, we always lock here to ensure there are no deadlocks where we
+ // were holding the lock coming into the macro but didn't catch it because we
+ // didn't generate an action and didn't have any HTLCs to fail backwards in the
+ // finish_force_close_channel.
+ if channel_state.is_none() {
+ channel_state = Some($self.channel_state.lock().unwrap());
+ }
+ }
log_error!($self, "{}", err.err);
if let msgs::ErrorAction::IgnoreError = err.action {
- } else { $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() }); }
+ } else {
+ if channel_state.is_none() {
+ channel_state = Some($self.channel_state.lock().unwrap());
+ }
+ channel_state.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() });
+ }
// Return error in case higher-API need one
Err(err)
},
} else if $resend_commitment { "commitment" }
else if $resend_raa { "RAA" }
else { "nothing" },
- (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
+ (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
if !$resend_commitment {
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
}
}
-impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
/// Constructs a new ChannelManager to hold several channels and route between them.
///
/// This is the main "logic hub" for all channel-related actions, and implements
/// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's
/// `block_(dis)connected` methods, which will notify all registered listeners in one
/// go.
- pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>, config: UserConfig, current_blockchain_height: usize) -> Result<Arc<ChannelManager<ChanSigner>>, secp256k1::Error> {
+ pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: M, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>, config: UserConfig, current_blockchain_height: usize) -> Result<ChannelManager<ChanSigner, M>, secp256k1::Error> {
let secp_ctx = Secp256k1::new();
- let res = Arc::new(ChannelManager {
+ let res = ChannelManager {
default_configuration: config.clone(),
genesis_hash: genesis_block(network).header.bitcoin_hash(),
+ //genesis_hash: Sha256dHash::from_hex("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206").unwrap(),
fee_estimator: feeest.clone(),
- monitor: monitor.clone(),
+ monitor,
tx_broadcaster,
latest_block_height: AtomicUsize::new(current_blockchain_height),
}),
our_network_key: keys_manager.get_node_secret(),
+ last_node_announcement_serial: AtomicUsize::new(0),
+
+ per_peer_state: RwLock::new(HashMap::new()),
+
pending_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
keys_manager,
logger,
- });
+ };
Ok(res)
}
Ok(())
}
- /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
- /// more information.
- pub fn list_channels(&self) -> Vec<ChannelDetails> {
- let channel_state = self.channel_state.lock().unwrap();
- let mut res = Vec::with_capacity(channel_state.by_id.len());
- for (channel_id, channel) in channel_state.by_id.iter() {
- let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
- res.push(ChannelDetails {
- channel_id: (*channel_id).clone(),
- short_channel_id: channel.get_short_channel_id(),
- remote_network_id: channel.get_their_node_id(),
- channel_value_satoshis: channel.get_value_satoshis(),
- inbound_capacity_msat,
- outbound_capacity_msat,
- user_id: channel.get_user_id(),
- is_live: channel.is_live(),
- });
- }
- res
- }
-
- /// Gets the list of usable channels, in random order. Useful as an argument to
- /// Router::get_route to ensure non-announced channels are used.
- ///
- /// These are guaranteed to have their is_live value set to true, see the documentation for
- /// ChannelDetails::is_live for more info on exactly what the criteria are.
- pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
- let channel_state = self.channel_state.lock().unwrap();
- let mut res = Vec::with_capacity(channel_state.by_id.len());
- for (channel_id, channel) in channel_state.by_id.iter() {
- // Note we use is_live here instead of usable which leads to somewhat confused
- // internal/external nomenclature, but that's ok cause that's probably what the user
- // really wanted anyway.
- if channel.is_live() {
+ fn list_channels_with_filter<F: FnMut(&(&[u8; 32], &Channel<ChanSigner>)) -> bool>(&self, f: F) -> Vec<ChannelDetails> {
+ let mut res = Vec::new();
+ {
+ let channel_state = self.channel_state.lock().unwrap();
+ res.reserve(channel_state.by_id.len());
+ for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_their_node_id(),
+ counterparty_features: InitFeatures::empty(),
channel_value_satoshis: channel.get_value_satoshis(),
inbound_capacity_msat,
outbound_capacity_msat,
user_id: channel.get_user_id(),
- is_live: true,
+ is_live: channel.is_live(),
});
}
}
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for chan in res.iter_mut() {
+ if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
+ chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+ }
+ }
res
}
+ /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
+ /// more information.
+ pub fn list_channels(&self) -> Vec<ChannelDetails> {
+ self.list_channels_with_filter(|_| true)
+ }
+
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// Router::get_route to ensure non-announced channels are used.
+ ///
+ /// These are guaranteed to have their is_live value set to true, see the documentation for
+ /// ChannelDetails::is_live for more info on exactly what the criteria are.
+ pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
+ // Note we use is_live here instead of usable which leads to somewhat confused
+ // internal/external nomenclature, but that's ok cause that's probably what the user
+ // really wanted anyway.
+ self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
+ }
+
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// will be accepted on the given channel, and after additional timeout/the closing of all
/// pending HTLCs, the channel will be closed on chain.
}
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
- let next_hop_data = {
- let mut decoded = [0; 65];
- chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
- match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
+ let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
+ let (next_hop_data, next_hop_hmac) = {
+ match msgs::OnionHopData::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
+ msgs::DecodeError::UnknownRequiredFeature|
+ msgs::DecodeError::InvalidValue|
+ msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload
_ => 0x2000 | 2, // Should never happen
};
return_err!("Unable to decode our hop data", error_code, &[0;0]);
},
- Ok(msg) => msg
+ Ok(msg) => {
+ let mut hmac = [0; 32];
+ if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) {
+ return_err!("Unable to decode hop data", 0x4000 | 22, &[0;0]);
+ }
+ (msg, hmac)
+ },
}
};
- let pending_forward_info = if next_hop_data.hmac == [0; 32] {
+ let pending_forward_info = if next_hop_hmac == [0; 32] {
#[cfg(test)]
{
// In tests, make sure that the initial onion pcket data is, at least, non-0.
// as-is (and were originally 0s).
// Of course reverse path calculation is still pretty easy given naive routing
// algorithms, but this fixes the most-obvious case.
- let mut new_packet_data = [0; 19*65];
- chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
- assert_ne!(new_packet_data[0..65], [0; 65][..]);
- assert_ne!(new_packet_data[..], [0; 19*65][..]);
+ let mut next_bytes = [0; 32];
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
}
// OUR PAYMENT!
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
- if next_hop_data.data.amt_to_forward > msg.amount_msat {
+ if next_hop_data.amt_to_forward > msg.amount_msat {
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
}
// final_incorrect_cltv_expiry
- if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
+ if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
}
+ let payment_data = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { .. } => None,
+ msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+ msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
+ };
+
// Note that we could obviously respond immediately with an update_fulfill_htlc
// message, however that would leak that we are the recipient of this payment, so
// instead we stay symmetric with the forwarding case, only responding (after a
// delay) once they've send us a commitment_signed!
- PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
- onion_packet: None,
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ type_data: PendingForwardReceiveHTLCInfo::Receive {
+ payment_data,
+ incoming_cltv_expiry: msg.cltv_expiry,
+ },
payment_hash: msg.payment_hash.clone(),
- short_channel_id: 0,
incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
} else {
let mut new_packet_data = [0; 20*65];
- chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
- chacha.process(&SIXTY_FIVE_ZEROS[..], &mut new_packet_data[19*65..]);
+ let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
+ #[cfg(debug_assertions)]
+ {
+ // Check two things:
+ // a) that the behavior of our stream here will return Ok(0) even if the TLV
+ // read above emptied out our buffer and the unwrap() wont needlessly panic
+ // b) that we didn't somehow magically end up with extra data.
+ let mut t = [0; 1];
+ debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
+ }
+ // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
+ // fill the onion hop data we'll forward to our next-hop peer.
+ chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
version: 0,
public_key,
hop_data: new_packet_data,
- hmac: next_hop_data.hmac.clone(),
+ hmac: next_hop_hmac.clone(),
};
- PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
- onion_packet: Some(outgoing_packet),
+ let short_channel_id = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::FinalNode { .. } => {
+ return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
+ },
+ };
+
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ type_data: PendingForwardReceiveHTLCInfo::Forward {
+ onion_packet: outgoing_packet,
+ short_channel_id: short_channel_id,
+ },
payment_hash: msg.payment_hash.clone(),
- short_channel_id: next_hop_data.data.short_channel_id,
incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
};
channel_state = Some(self.channel_state.lock().unwrap());
- if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
- if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
+ if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref type_data, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ // If short_channel_id is 0 here, we'll reject them in the body here (which is
+ // important as various things later assume we are a ::Receive if short_channel_id is
+ // non-0.
+ if let &PendingForwardReceiveHTLCInfo::Forward { ref short_channel_id, .. } = type_data {
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
let forwarding_id = match id_option {
None => { // unknown_next_peer
/// payment_preimage tracking (which you should already be doing as they represent "proof of
/// payment") and prevent double-sends yourself.
///
- /// May generate a SendHTLCs message event on success, which should be relayed.
+ /// May generate SendHTLCs message(s) event on success, which should be relayed.
///
- /// Raises APIError::RoutError when invalid route or forward parameter
- /// (cltv_delta, fee, node public key) is specified.
- /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
- /// (including due to previous monitor update failure or new permanent monitor update failure).
- /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
- /// relevant updates.
+ /// Each path may have a different return value, and PaymentSendValue may return a Vec with
+ /// each entry matching the corresponding-index entry in the route paths.
///
- /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
- /// and you may wish to retry via a different route immediately.
- /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
- /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
- /// the payment via a different route unless you intend to pay twice!
- pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> {
- if route.hops.len() < 1 || route.hops.len() > 20 {
- return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
- }
+ /// In general, a path may raise:
+ /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+ /// node public key) is specified.
+ /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// (including due to previous monitor update failure or new permanent monitor update
+ /// failure).
+ /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// relevant updates.
+ ///
+ /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
+ /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
+ /// different route unless you intend to pay twice!
+ ///
+ /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate
+ /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For
+ /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
+ /// must not contain multiple paths as otherwise the multipath data cannot be sent.
+ /// If a payment_secret *is* provided, we assume that the invoice had the basic_mpp feature bit
+ /// set (either as required or as available).
+ pub fn send_payment(&self, route: Route, payment_hash: PaymentHash, payment_secret: Option<&[u8; 32]>) -> Result<(), PaymentSendFailure> {
+ if route.paths.len() < 1 {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
+ }
+ if route.paths.len() > 10 {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
+ }
+ let mut total_value = 0;
let our_node_id = self.get_our_node_id();
- for (idx, hop) in route.hops.iter().enumerate() {
- if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
- return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
+ for path in route.paths.iter() {
+ if path.len() < 1 || path.len() > 20 {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
}
+ for (idx, hop) in path.iter().enumerate() {
+ if idx != path.len() - 1 && hop.pubkey == our_node_id {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"}));
+ }
+ }
+ total_value += path.last().unwrap().fee_msat;
}
-
- let (session_priv, prng_seed) = self.keys_manager.get_onion_rand();
-
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let mut results = Vec::new();
+ 'path_loop: for path in route.paths.iter() {
+ macro_rules! check_res_push {
+ ($res: expr) => { match $res {
+ Ok(r) => r,
+ Err(e) => {
+ results.push(Err(e));
+ continue 'path_loop;
+ },
+ }
+ }
+ }
- let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
- APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);
+ log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
+ let (session_priv, prng_seed) = self.keys_manager.get_onion_rand();
- let _ = self.total_consistency_lock.read().unwrap();
+ let onion_keys = check_res_push!(onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
+ .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"}));
+ let (onion_payloads, htlc_msat, htlc_cltv) = check_res_push!(onion_utils::build_onion_payloads(&path, total_value, payment_secret, cur_height));
+ if onion_utils::route_size_insane(&onion_payloads) {
+ check_res_push!(Err(APIError::RouteError{err: "Route had too large size once"}));
+ }
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);
- let mut channel_lock = self.channel_state.lock().unwrap();
- let err: Result<(), _> = loop {
+ let _ = self.total_consistency_lock.read().unwrap();
- let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
- None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
- Some(id) => id.clone(),
- };
+ let err: Result<(), _> = loop {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
+ None => check_res_push!(Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"})),
+ Some(id) => id.clone(),
+ };
- let channel_state = &mut *channel_lock;
- if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
- match {
- if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
- return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
- }
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
- }
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- route: route.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- }, onion_packet), channel_state, chan)
- } {
- Some((update_add, commitment_signed, chan_monitor)) => {
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
- // Note that MonitorUpdateFailed here indicates (per function docs)
- // that we will resent the commitment update once we unfree monitor
- // updating, so we have to take special care that we don't return
- // something else in case we will resend later!
- return Err(APIError::MonitorUpdateFailed);
+ let channel_state = &mut *channel_lock;
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
+ match {
+ if chan.get().get_their_node_id() != path.first().unwrap().pubkey {
+ check_res_push!(Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}));
+ }
+ if !chan.get().is_live() {
+ check_res_push!(Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}));
}
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet), channel_state, chan)
+ } {
+ Some((update_add, commitment_signed, chan_monitor)) => {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
+ // Note that MonitorUpdateFailed here indicates (per function docs)
+ // that we will resent the commitment update once we unfree monitor
+ // updating, so we have to take special care that we don't return
+ // something else in case we will resend later!
+ check_res_push!(Err(APIError::MonitorUpdateFailed));
+ }
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: route.hops.first().unwrap().pubkey,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: vec![update_add],
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- },
- });
- },
- None => {},
- }
- } else { unreachable!(); }
- return Ok(());
- };
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: path.first().unwrap().pubkey,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ },
+ });
+ },
+ None => {},
+ }
+ } else { unreachable!(); }
+ results.push(Ok(()));
+ continue 'path_loop;
+ };
- match handle_error!(self, err, route.hops.first().unwrap().pubkey, channel_lock) {
- Ok(_) => unreachable!(),
- Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) }
+ match handle_error!(self, err, path.first().unwrap().pubkey) {
+ Ok(_) => unreachable!(),
+ Err(e) => {
+ check_res_push!(Err(APIError::ChannelUnavailable { err: e.err }));
+ },
+ }
+ }
+ let mut has_ok = false;
+ let mut has_err = false;
+ for res in results.iter() {
+ if res.is_ok() { has_ok = true; }
+ if res.is_err() { has_err = true; }
+ if let &Err(APIError::MonitorUpdateFailed) = res {
+ // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+ // PartialFailure.
+ has_err = true;
+ has_ok = true;
+ break;
+ }
+ }
+ if has_err && has_ok {
+ Err(PaymentSendFailure::PartialFailure(results))
+ } else if has_err {
+ Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
+ } else {
+ Ok(())
}
}
let _ = self.total_consistency_lock.read().unwrap();
let (mut chan, msg, chan_monitor) = {
- let mut channel_state = self.channel_state.lock().unwrap();
- let (res, chan) = match channel_state.by_id.remove(temporary_channel_id) {
+ let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
Some(mut chan) => {
(chan.get_outbound_funding_created(funding_txo)
.map_err(|e| if let ChannelError::Close(msg) = e {
},
None => return
};
- match handle_error!(self, res, chan.get_their_node_id(), channel_state) {
+ match handle_error!(self, res, chan.get_their_node_id()) {
Ok(funding_msg) => {
(chan, funding_msg.0, funding_msg.1)
},
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
match e {
ChannelMonitorUpdateErr::PermanentFailure => {
- {
- let mut channel_state = self.channel_state.lock().unwrap();
- match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) {
- Err(_) => { return; },
- Ok(()) => unreachable!(),
- }
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id()) {
+ Err(_) => { return; },
+ Ok(()) => unreachable!(),
}
},
ChannelMonitorUpdateErr::TemporaryFailure => {
})
}
+ /// Generates a signed node_announcement from the given arguments and creates a
+ /// BroadcastNodeAnnouncement event.
+ ///
+ /// RGB is a node "color" and alias a printable human-readable string to describe this node to
+ /// humans. They carry no in-protocol meaning.
+ ///
+ /// addresses represent the set (possibly empty) of socket addresses on which this node accepts
+ /// incoming connections.
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: msgs::NetAddressSet) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let announcement = msgs::UnsignedNodeAnnouncement {
+ features: NodeFeatures::supported(),
+ timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
+ node_id: self.get_our_node_id(),
+ rgb, alias,
+ addresses: addresses.to_vec(),
+ excess_address_data: Vec::new(),
+ excess_data: Vec::new(),
+ };
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: msgs::NodeAnnouncement {
+ signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
+ contents: announcement
+ },
+ });
+ }
+
/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
});
- failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
+ failed_forwards.push((htlc_source, forward_info.payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: Vec::new() }
+ ));
},
HTLCForwardInfo::FailHTLC { .. } => {
// Channel went away before we could fail it. This implies
let mut fail_htlc_msgs = Vec::new();
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
- log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+ type_data: PendingForwardReceiveHTLCInfo::Forward {
+ onion_packet, ..
+ }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, } => {
+ log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ incoming_packet_shared_secret: incoming_shared_secret,
});
- match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
+ match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
- log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
+ log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}
let chan_update = self.get_channel_update(chan.get()).unwrap();
- failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
+ ));
continue;
},
Ok(update_add) => {
}
}
},
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
},
ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
- match handle_error!(self, err, their_node_id, channel_state) {
- Ok(_) => unreachable!(),
- Err(_) => { continue; },
- }
+ handle_errors.push((their_node_id, err));
+ continue;
}
};
if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
} else {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+ type_data: PendingForwardReceiveHTLCInfo::Receive { payment_data, incoming_cltv_expiry },
+ incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => {
let prev_hop_data = HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ incoming_packet_shared_secret: incoming_shared_secret,
};
- match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
- hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)),
- hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); },
- };
- new_events.push(events::Event::PaymentReceived {
- payment_hash: forward_info.payment_hash,
- amt: forward_info.amt_to_forward,
+
+ let mut total_value = 0;
+ let htlcs = channel_state.claimable_htlcs.entry((payment_hash, if let &Some(ref data) = &payment_data {
+ Some(data.payment_secret.clone()) } else { None }))
+ .or_insert(Vec::new());
+ htlcs.push(ClaimableHTLC {
+ src: prev_hop_data,
+ value: amt_to_forward,
+ payment_data: payment_data.clone(),
+ cltv_expiry: incoming_cltv_expiry,
});
+ if let &Some(ref data) = &payment_data {
+ for htlc in htlcs.iter() {
+ total_value += htlc.value;
+ if htlc.payment_data.as_ref().unwrap().total_msat != data.total_msat {
+ total_value = msgs::MAX_VALUE_MSAT;
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT {
+ for htlc in htlcs.iter() {
+ failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: htlc.src.short_channel_id,
+ htlc_id: htlc.src.htlc_id,
+ incoming_packet_shared_secret: htlc.src.incoming_packet_shared_secret,
+ }), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(htlc.value).to_vec() }
+ ));
+ }
+ } else if total_value >= data.total_msat {
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash: payment_hash,
+ payment_secret: Some(data.payment_secret),
+ amt: total_value,
+ });
+ }
+ } else {
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash: payment_hash,
+ payment_secret: None,
+ amt: amt_to_forward,
+ });
+ }
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
},
HTLCForwardInfo::FailHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
- for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
- match update {
- None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
- Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
- };
+ for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
}
- if handle_errors.len() > 0 {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- for (their_node_id, err) in handle_errors.drain(..) {
- let _ = handle_error!(self, err, their_node_id, channel_state_lock);
- }
+ for (their_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, their_node_id);
}
if new_events.is_empty() { return }
/// along the path (including in our own channel on which we received it).
/// Returns false if no payment was found to fail backwards, true if the process of failing the
/// HTLC backwards has been started.
- pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, payment_secret: &Option<[u8; 32]>) -> bool {
let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(*payment_hash, *payment_secret));
if let Some(mut sources) = removed_source {
- for (recvd_value, htlc_with_hash) in sources.drain(..) {
+ for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc_with_hash), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() });
+ HTLCSource::PreviousHopData(htlc.src), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(htlc.value).to_vec() });
}
true
} else { false }
//between the branches here. We should make this async and move it into the forward HTLCs
//timer handling.
match source {
- HTLCSource::OutboundRoute { ref route, .. } => {
+ HTLCSource::OutboundRoute { ref path, .. } => {
log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
mem::drop(channel_state_lock);
match &onion_error {
self.pending_events.lock().unwrap().push(
events::Event::PaymentFailed {
payment_hash: payment_hash.clone(),
- rejected_by_dest: route.hops.len() == 1,
+ rejected_by_dest: path.len() == 1,
#[cfg(test)]
error_code: Some(*failure_code),
}
/// motivated attackers.
///
/// May panic if called except in response to a PaymentReceived event.
- pub fn claim_funds(&self, payment_preimage: PaymentPreimage, expected_amount: u64) -> bool {
+ pub fn claim_funds(&self, payment_preimage: PaymentPreimage, payment_secret: &Option<[u8; 32]>, expected_amount: u64) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(payment_hash, *payment_secret));
if let Some(mut sources) = removed_source {
- for (received_amount, htlc_with_hash) in sources.drain(..) {
+ assert!(!sources.is_empty());
+ let passes_value = if let &Some(ref data) = &sources[0].payment_data {
+ assert!(payment_secret.is_some());
+ if data.total_msat == expected_amount { true } else { false }
+ } else {
+ assert!(payment_secret.is_none());
+ false
+ };
+
+ let mut one_claimed = false;
+ for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if received_amount < expected_amount || received_amount > expected_amount * 2 {
- let mut htlc_msat_data = byte_utils::be64_to_array(received_amount).to_vec();
+ if !passes_value && (htlc.value < expected_amount || htlc.value > expected_amount * 2) {
+ let mut htlc_msat_data = byte_utils::be64_to_array(htlc.value).to_vec();
let mut height_data = byte_utils::be32_to_array(self.latest_block_height.load(Ordering::Acquire) as u32).to_vec();
htlc_msat_data.append(&mut height_data);
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc_with_hash), &payment_hash,
+ HTLCSource::PreviousHopData(htlc.src), &payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_data });
} else {
- self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
+ self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.src), payment_preimage);
+ one_claimed = true;
}
}
- true
+ one_claimed
} else { false }
}
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
return;
};
- let _ = handle_error!(self, err, their_node_id, channel_state_lock);
+ mem::drop(channel_state_lock);
+ let _ = handle_error!(self, err, their_node_id);
}
/// Gets the node_id held by this ChannelManager
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
- if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+ if let PendingHTLCStatus::Forward(PendingHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
let chan_update = self.get_channel_update(chan.get());
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
- match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
+ match channel_state.forward_htlcs.entry(match forward_info.type_data {
+ PendingForwardReceiveHTLCInfo::Forward { short_channel_id, .. } => short_channel_id,
+ PendingForwardReceiveHTLCInfo::Receive { .. } => 0,
+ }) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
},
#[doc(hidden)]
pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
let _ = self.total_consistency_lock.read().unwrap();
- let mut channel_state_lock = self.channel_state.lock().unwrap();
let their_node_id;
let err: Result<(), _> = loop {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id) {
return Ok(())
};
- match handle_error!(self, err, their_node_id, channel_state_lock) {
+ match handle_error!(self, err, their_node_id) {
Ok(_) => unreachable!(),
Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
}
}
}
-impl<ChanSigner: ChannelKeys> events::MessageSendEventsProvider for ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
}
}
-impl<ChanSigner: ChannelKeys> events::EventsProvider for ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref> events::EventsProvider for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
}
}
-impl<ChanSigner: ChannelKeys> ChainListener for ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChainListener for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
let header_hash = header.bitcoin_hash();
log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
+ let mut timed_out_htlcs = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
- let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
- if let Ok(Some(funding_locked)) = chan_res {
- pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: channel.get_their_node_id(),
- msg: funding_locked,
- });
- if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ let res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
+ if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
+ timed_out_htlcs.reserve(timed_out_pending_htlcs.len());
+ for (htlc_src, payment_hash, value) in timed_out_pending_htlcs.drain(..) {
+ timed_out_htlcs.push((htlc_src, payment_hash, value));
+ }
+ if let Some(funding_locked) = chan_res {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
node_id: channel.get_their_node_id(),
- msg: announcement_sigs,
+ msg: funding_locked,
});
+ if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.get_their_node_id(),
+ msg: announcement_sigs,
+ });
+ }
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
- } else if let Err(e) = chan_res {
+ } else if let Err(e) = res {
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_their_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
}
true
});
+
+ channel_state.claimable_htlcs.retain(|&(ref payment_hash, _), htlcs| {
+ htlcs.retain(|htlc| {
+ if height >= htlc.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS {
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.src.clone()), payment_hash.clone(), htlc.value));
+ false
+ } else { true }
+ });
+ !htlcs.is_empty()
+ });
}
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
+
+ for (source, payment_hash, value) in timed_out_htlcs.drain(..) {
+ // Call it preimage_unknown as the issue, ultimately, is that the user failed to
+ // provide us a preimage within the cltv_expiry time window.
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: byte_utils::be64_to_array(value).to_vec()
+ });
+ }
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
}
}
}
-impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChannelMessageHandler for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_open_channel(their_node_id, their_features, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_open_channel(their_node_id, their_features, msg), *their_node_id);
}
fn handle_accept_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_accept_channel(their_node_id, their_features, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_accept_channel(their_node_id, their_features, msg), *their_node_id);
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_created(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_created(their_node_id, msg), *their_node_id);
}
fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_signed(their_node_id, msg), *their_node_id);
}
fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_locked(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_locked(their_node_id, msg), *their_node_id);
}
fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_shutdown(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_shutdown(their_node_id, msg), *their_node_id);
}
fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_closing_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_closing_signed(their_node_id, msg), *their_node_id);
}
fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_add_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fulfill_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fail_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fail_malformed_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), *their_node_id);
}
fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_commitment_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_commitment_signed(their_node_id, msg), *their_node_id);
}
fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_revoke_and_ack(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), *their_node_id);
}
fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fee(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fee(their_node_id, msg), *their_node_id);
}
fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_announcement_signatures(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), *their_node_id);
}
fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_channel_reestablish(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), *their_node_id);
}
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
let mut failed_payments = Vec::new();
+ let mut no_channels_remain = true;
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
short_to_id.remove(&short_id);
}
return false;
+ } else {
+ no_channels_remain = false;
}
}
true
&events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+ &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
}
});
}
+ if no_channels_remain {
+ self.per_peer_state.write().unwrap().remove(their_node_id);
+ }
+
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
}
}
- fn peer_connected(&self, their_node_id: &PublicKey) {
+ fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
let _ = self.total_consistency_lock.read().unwrap();
+
+ {
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
+ match peer_state_lock.entry(their_node_id.clone()) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(Mutex::new(PeerState {
+ latest_features: init_msg.features.clone(),
+ }));
+ },
+ hash_map::Entry::Occupied(e) => {
+ e.get().lock().unwrap().latest_features = init_msg.features.clone();
+ },
+ }
+ }
+
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl Writeable for PendingForwardHTLCInfo {
+impl Writeable for PendingHTLCInfo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- self.onion_packet.write(writer)?;
+ match &self.type_data {
+ &PendingForwardReceiveHTLCInfo::Forward { ref onion_packet, ref short_channel_id } => {
+ 0u8.write(writer)?;
+ onion_packet.write(writer)?;
+ short_channel_id.write(writer)?;
+ },
+ &PendingForwardReceiveHTLCInfo::Receive { ref payment_data, ref incoming_cltv_expiry } => {
+ 1u8.write(writer)?;
+ payment_data.write(writer)?;
+ incoming_cltv_expiry.write(writer)?;
+ },
+ }
self.incoming_shared_secret.write(writer)?;
self.payment_hash.write(writer)?;
- self.short_channel_id.write(writer)?;
self.amt_to_forward.write(writer)?;
self.outgoing_cltv_value.write(writer)?;
Ok(())
}
}
-impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
- fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
- Ok(PendingForwardHTLCInfo {
- onion_packet: Readable::read(reader)?,
+impl<R: ::std::io::Read> Readable<R> for PendingHTLCInfo {
+ fn read(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
+ Ok(PendingHTLCInfo {
+ type_data: match Readable::read(reader)? {
+ 0u8 => PendingForwardReceiveHTLCInfo::Forward {
+ onion_packet: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ },
+ 1u8 => PendingForwardReceiveHTLCInfo::Receive {
+ payment_data: Readable::read(reader)?,
+ incoming_cltv_expiry: Readable::read(reader)?,
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ },
incoming_shared_secret: Readable::read(reader)?,
payment_hash: Readable::read(reader)?,
- short_channel_id: Readable::read(reader)?,
amt_to_forward: Readable::read(reader)?,
outgoing_cltv_value: Readable::read(reader)?,
})
0u8.write(writer)?;
hop_data.write(writer)?;
},
- &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
+ &HTLCSource::OutboundRoute { ref path, ref session_priv, ref first_hop_htlc_msat } => {
1u8.write(writer)?;
- route.write(writer)?;
+ path.write(writer)?;
session_priv.write(writer)?;
first_hop_htlc_msat.write(writer)?;
}
match <u8 as Readable<R>>::read(reader)? {
0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
1 => Ok(HTLCSource::OutboundRoute {
- route: Readable::read(reader)?,
+ path: Readable::read(reader)?,
session_priv: Readable::read(reader)?,
first_hop_htlc_msat: Readable::read(reader)?,
}),
}
}
-impl<ChanSigner: ChannelKeys + Writeable> Writeable for ChannelManager<ChanSigner> {
+impl<ChanSigner: ChannelKeys + Writeable, M: Deref> Writeable for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
let _ = self.total_consistency_lock.write().unwrap();
for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
- for &(recvd_amt, ref previous_hop) in previous_hops.iter() {
- recvd_amt.write(writer)?;
- previous_hop.write(writer)?;
+ for htlc in previous_hops.iter() {
+ htlc.src.write(writer)?;
+ htlc.value.write(writer)?;
+ htlc.payment_data.write(writer)?;
+ htlc.cltv_expiry.write(writer)?;
}
}
+ let per_peer_state = self.per_peer_state.write().unwrap();
+ (per_peer_state.len() as u64).write(writer)?;
+ for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
+ peer_pubkey.write(writer)?;
+ let peer_state = peer_state_mutex.lock().unwrap();
+ peer_state.latest_features.write(writer)?;
+ }
+
+ (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+
Ok(())
}
}
/// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
/// 6) Disconnect/connect blocks on the ChannelManager.
/// 7) Register the new ChannelManager with your ChainWatchInterface.
-pub struct ChannelManagerReadArgs<'a, ChanSigner: ChannelKeys> {
+pub struct ChannelManagerReadArgs<'a, ChanSigner: ChannelKeys, M: Deref> where M::Target: ManyChannelMonitor {
/// The keys provider which will give us relevant keys. Some keys will be loaded during
/// deserialization.
pub keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>,
/// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
/// you have deserialized ChannelMonitors separately and will add them to your
/// ManyChannelMonitor after deserializing this ChannelManager.
- pub monitor: Arc<ManyChannelMonitor>,
+ pub monitor: M,
/// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
/// used to broadcast the latest local commitment transactions of channels which must be
pub channel_monitors: &'a mut HashMap<OutPoint, &'a mut ChannelMonitor>,
}
-impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R, ChannelManagerReadArgs<'a, ChanSigner>> for (Sha256dHash, ChannelManager<ChanSigner>) {
- fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner>) -> Result<Self, DecodeError> {
+// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
+// SipmleArcChannelManager type:
+impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>, M: Deref> ReadableArgs<R, ChannelManagerReadArgs<'a, ChanSigner, M>> for (Sha256dHash, Arc<ChannelManager<ChanSigner, M>>) where M::Target: ManyChannelMonitor {
+ fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M>) -> Result<Self, DecodeError> {
+ let (blockhash, chan_manager) = <(Sha256dHash, ChannelManager<ChanSigner, M>)>::read(reader, args)?;
+ Ok((blockhash, Arc::new(chan_manager)))
+ }
+}
+
+impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>, M: Deref> ReadableArgs<R, ChannelManagerReadArgs<'a, ChanSigner, M>> for (Sha256dHash, ChannelManager<ChanSigner, M>) where M::Target: ManyChannelMonitor {
+ fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M>) -> Result<Self, DecodeError> {
let _ver: u8 = Readable::read(reader)?;
let min_ver: u8 = Readable::read(reader)?;
if min_ver > SERIALIZATION_VERSION {
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
for _ in 0..previous_hops_len {
- previous_hops.push((Readable::read(reader)?, Readable::read(reader)?));
+ previous_hops.push(ClaimableHTLC {
+ src: Readable::read(reader)?,
+ value: Readable::read(reader)?,
+ payment_data: Readable::read(reader)?,
+ cltv_expiry: Readable::read(reader)?,
+ });
}
claimable_htlcs.insert(payment_hash, previous_hops);
}
+ let peer_count: u64 = Readable::read(reader)?;
+ let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128));
+ for _ in 0..peer_count {
+ let peer_pubkey = Readable::read(reader)?;
+ let peer_state = PeerState {
+ latest_features: Readable::read(reader)?,
+ };
+ per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
+ }
+
+ let last_node_announcement_serial: u32 = Readable::read(reader)?;
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
}),
our_network_key: args.keys_manager.get_node_secret(),
+ last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
+
+ per_peer_state: RwLock::new(per_peer_state),
+
pending_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
keys_manager: args.keys_manager,