channel_capacity: None,
}
}
+
+ fn closes_channel(&self) -> bool {
+ self.chan_id.is_some()
+ }
}
/// We hold back HTLCs we intend to relay for a random interval greater than this (see
background_events_processed_since_startup: AtomicBool,
- persistence_notifier: Notifier,
+ event_persist_notifier: Notifier,
+ needs_persist_flag: AtomicBool,
entropy_source: ES,
node_signer: NS,
#[must_use]
enum NotifyOption {
DoPersist,
- SkipPersist,
+ SkipPersistHandleEvents,
+ SkipPersistNoEvents,
}
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
/// notify or not based on whether relevant changes have been made, providing a closure to
/// `optionally_notify` which returns a `NotifyOption`.
-struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
- persistence_notifier: &'a Notifier,
+struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
+ event_persist_notifier: &'a Notifier,
+ needs_persist_flag: &'a AtomicBool,
should_persist: F,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard: RwLockReadGuard<'a, ()>,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
- fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ /// Notifies any waiters and indicates that we need to persist, in addition to possibly having
+ /// events to handle.
+ ///
+ /// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
+ /// other cases where losing the changes on restart may result in a force-close or otherwise
+ /// isn't ideal.
+ fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
+ Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
+ }
+
+ fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
+ -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
- let _ = cm.get_cm().process_background_events(); // We always persist
+ let force_notify = cm.get_cm().process_background_events();
PersistenceNotifierGuard {
- persistence_notifier: &cm.get_cm().persistence_notifier,
- should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+ event_persist_notifier: &cm.get_cm().event_persist_notifier,
+ needs_persist_flag: &cm.get_cm().needs_persist_flag,
+ should_persist: move || {
+ // Pick the "most" action between `persist_check` and the background events
+ // processing and return that.
+ let notify = persist_check();
+ match (notify, force_notify) {
+ (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
+ (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
+ (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
+ (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
+ _ => NotifyOption::SkipPersistNoEvents,
+ }
+ },
_read_guard: read_guard,
}
-
}
/// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
- /// [`ChannelManager::process_background_events`] MUST be called first.
- fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
- let read_guard = lock.read().unwrap();
+ /// [`ChannelManager::process_background_events`] MUST be called first (or
+ /// [`Self::optionally_notify`] used).
+ fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
+ (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+ let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
PersistenceNotifierGuard {
- persistence_notifier: notifier,
+ event_persist_notifier: &cm.get_cm().event_persist_notifier,
+ needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: persist_check,
_read_guard: read_guard,
}
}
}
-impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
+impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
- if (self.should_persist)() == NotifyOption::DoPersist {
- self.persistence_notifier.notify();
+ match (self.should_persist)() {
+ NotifyOption::DoPersist => {
+ self.needs_persist_flag.store(true, Ordering::Release);
+ self.event_persist_notifier.notify()
+ },
+ NotifyOption::SkipPersistHandleEvents =>
+ self.event_persist_notifier.notify(),
+ NotifyOption::SkipPersistNoEvents => {},
}
}
}
return;
}
- let mut result = NotifyOption::SkipPersist;
+ let mut result;
{
// We'll acquire our total consistency lock so that we can be sure no other
// Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
// ensure any startup-generated background events are handled first.
- if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+ result = $self.process_background_events();
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
processed_all_events = false;
}
- if result == NotifyOption::DoPersist {
- $self.persistence_notifier.notify();
+ match result {
+ NotifyOption::DoPersist => {
+ $self.needs_persist_flag.store(true, Ordering::Release);
+ $self.event_persist_notifier.notify();
+ },
+ NotifyOption::SkipPersistHandleEvents =>
+ $self.event_persist_notifier.notify(),
+ NotifyOption::SkipPersistNoEvents => {},
}
}
}
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
- persistence_notifier: Notifier::new(),
+
+ event_persist_notifier: Notifier::new(),
+ needs_persist_flag: AtomicBool::new(false),
entropy_source,
node_signer,
let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
(short_channel_id, amt_to_forward, outgoing_cltv_value),
- msgs::InboundOnionPayload::Receive { .. } =>
+ msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
return Err(InboundOnionErr {
msg: "Final Node OnionHopData provided for us as an intermediary node",
err_code: 0x4000 | 22,
payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
} =>
(payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
- _ =>
+ msgs::InboundOnionPayload::BlindedReceive {
+ amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
+ } => {
+ let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
+ (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
+ }
+ msgs::InboundOnionPayload::Forward { .. } => {
return Err(InboundOnionErr {
err_code: 0x4000|22,
err_data: Vec::new(),
msg: "Got non final data with an HMAC of 0",
- }),
+ })
+ },
};
// final_incorrect_cltv_expiry
if outgoing_cltv_value > cltv_expiry {
}
}
- let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+ let next_hop = match onion_utils::decode_next_payment_hop(
+ shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
+ msg.payment_hash, &self.node_signer
+ ) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
return_malformed_err!(err_msg, err_code);
// We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
// inbound channel's state.
onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
- onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => {
+ onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
+ onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
+ {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
}
};
let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
- let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+ let next_hop = match onion_utils::decode_next_payment_hop(
+ phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
+ payment_hash, &self.node_signer
+ ) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
- return NotifyOption::SkipPersist;
+ return NotifyOption::SkipPersistNoEvents;
}
for event in background_events.drain(..) {
}
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
- if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
+ if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- return NotifyOption::SkipPersist;
+ chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
- &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- return NotifyOption::SkipPersist;
+ chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ return NotifyOption::SkipPersistNoEvents;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
/// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
/// it wants to detect). Thus, we have a variant exposed here for its benefit.
pub fn maybe_update_chan_fees(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut should_persist = NotifyOption::SkipPersistNoEvents;
let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
/// [`ChannelUpdate`]: msgs::ChannelUpdate
/// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut should_persist = NotifyOption::SkipPersistNoEvents;
let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+ // likely to be lost on restart!
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
}
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+ // likely to be lost on restart!
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
+
let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
}
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
}
fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
Ok(())
}
- /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+ /// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
None => {
// It's not a local channel
- return Ok(NotifyOption::SkipPersist)
+ return Ok(NotifyOption::SkipPersistNoEvents)
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- return Ok(NotifyOption::SkipPersist)
+ return Ok(NotifyOption::SkipPersistNoEvents)
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
// If the announcement is about a channel of ours which is public, some
// other peer may simply be forwarding all its gossip to us. Don't provide
// a scary-looking error message and return Ok instead.
- return Ok(NotifyOption::SkipPersist);
+ return Ok(NotifyOption::SkipPersistNoEvents);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.flags & 1 == 0;
if were_node_one == msg_from_node_one {
- return Ok(NotifyOption::SkipPersist);
+ return Ok(NotifyOption::SkipPersistNoEvents);
} else {
log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
- hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+ hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
}
Ok(NotifyOption::DoPersist)
}
- fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
+ fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
let htlc_forwards;
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
}
};
+ let mut persist = NotifyOption::SkipPersistHandleEvents;
if let Some(forwards) = htlc_forwards {
self.forward_htlcs(&mut [forwards][..]);
+ persist = NotifyOption::DoPersist;
}
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
- Ok(())
+ Ok(persist)
}
/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
/// the `MessageSendEvent`s to the specific peer they were generated under.
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut result = NotifyOption::SkipPersistNoEvents;
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
}
}
- /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
+ /// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
+ /// may have events that need processing.
+ ///
+ /// In order to check if this [`ChannelManager`] needs persisting, call
+ /// [`Self::get_and_clear_needs_persistence`].
///
/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
/// [`ChannelManager`] and should instead register actions to be taken later.
- ///
- pub fn get_persistable_update_future(&self) -> Future {
- self.persistence_notifier.get_future()
+ pub fn get_event_or_persistence_needed_future(&self) -> Future {
+ self.event_persist_notifier.get_future()
+ }
+
+ /// Returns true if this [`ChannelManager`] needs to be persisted.
+ pub fn get_and_clear_needs_persistence(&self) -> bool {
+ self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
#[cfg(any(test, feature = "_test_utils"))]
- pub fn get_persistence_condvar_value(&self) -> bool {
- self.persistence_notifier.notify_pending()
+ pub fn get_event_or_persist_condvar_value(&self) -> bool {
+ self.event_persist_notifier.notify_pending()
}
/// Gets the latest best block which was connected either via the [`chain::Listen`] or
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // open_channel message - pre-funded channels are never written so there should be no
+ // change to the contents.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_open_channel(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => {
+ debug_assert!(false, "We shouldn't close a new channel");
+ NotifyOption::DoPersist
+ },
+ _ => NotifyOption::SkipPersistHandleEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // accept_channel message - pre-funded channels are never written so there should be no
+ // change to the contents.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+ NotifyOption::SkipPersistHandleEvents
+ });
}
fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
}
fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // channel_ready message - while the channel's state will change, any channel_ready message
+ // will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
+ // will not force-close the channel on startup.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_channel_ready(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ _ => NotifyOption::SkipPersistHandleEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_add_htlc message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_add_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fail_htlc message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fail_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fail_malformed_htlc message - the message itself doesn't change our channel state
+ // only the `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fail_malformed_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fee message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fee(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let force_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
- if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
+ persist
} else {
- NotifyOption::SkipPersist
+ NotifyOption::DoPersist
}
});
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_channel_reestablish(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(persist) => *persist,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
+ self, || NotifyOption::SkipPersistHandleEvents);
+
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
return Err(());
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let mut res = Ok(());
- // If we have too many peers connected which don't have funded channels, disconnect the
- // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
- // unfunded channels taking up space in memory for disconnected peers, we still let new
- // peers connect, but we'll reject new channels from them.
- let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
- let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ // If we have too many peers connected which don't have funded channels, disconnect the
+ // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+ // unfunded channels taking up space in memory for disconnected peers, we still let new
+ // peers connect, but we'll reject new channels from them.
+ let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+ let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
- {
- let mut peer_state_lock = self.per_peer_state.write().unwrap();
- match peer_state_lock.entry(counterparty_node_id.clone()) {
- hash_map::Entry::Vacant(e) => {
- if inbound_peer_limited {
- return Err(());
- }
- e.insert(Mutex::new(PeerState {
- channel_by_id: HashMap::new(),
- inbound_channel_request_by_id: HashMap::new(),
- latest_features: init_msg.features.clone(),
- pending_msg_events: Vec::new(),
- in_flight_monitor_updates: BTreeMap::new(),
- monitor_update_blocked_actions: BTreeMap::new(),
- actions_blocking_raa_monitor_updates: BTreeMap::new(),
- is_connected: true,
- }));
- },
- hash_map::Entry::Occupied(e) => {
- let mut peer_state = e.get().lock().unwrap();
- peer_state.latest_features = init_msg.features.clone();
-
- let best_block_height = self.best_block.read().unwrap().height();
- if inbound_peer_limited &&
- Self::unfunded_channel_count(&*peer_state, best_block_height) ==
- peer_state.channel_by_id.len()
- {
- return Err(());
- }
+ {
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
+ match peer_state_lock.entry(counterparty_node_id.clone()) {
+ hash_map::Entry::Vacant(e) => {
+ if inbound_peer_limited {
+ res = Err(());
+ return NotifyOption::SkipPersistNoEvents;
+ }
+ e.insert(Mutex::new(PeerState {
+ channel_by_id: HashMap::new(),
+ inbound_channel_request_by_id: HashMap::new(),
+ latest_features: init_msg.features.clone(),
+ pending_msg_events: Vec::new(),
+ in_flight_monitor_updates: BTreeMap::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ is_connected: true,
+ }));
+ },
+ hash_map::Entry::Occupied(e) => {
+ let mut peer_state = e.get().lock().unwrap();
+ peer_state.latest_features = init_msg.features.clone();
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if inbound_peer_limited &&
+ Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+ peer_state.channel_by_id.len()
+ {
+ res = Err(());
+ return NotifyOption::SkipPersistNoEvents;
+ }
- debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
- peer_state.is_connected = true;
- },
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+ peer_state.is_connected = true;
+ },
+ }
}
- }
- log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+ log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let pending_msg_events = &mut peer_state.pending_msg_events;
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
- if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
- // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
- // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
- // worry about closing and removing them.
- debug_assert!(false);
- None
- }
- ).for_each(|chan| {
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
+ peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+ if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+ // worry about closing and removing them.
+ debug_assert!(false);
+ None
+ }
+ ).for_each(|chan| {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&self.logger),
+ });
});
- });
- }
- //TODO: Also re-broadcast announcement_signatures
- Ok(())
+ }
+
+ return NotifyOption::SkipPersistHandleEvents;
+ //TODO: Also re-broadcast announcement_signatures
+ });
+ res
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
- persistence_notifier: Notifier::new(),
+
+ event_persist_notifier: Notifier::new(),
+ needs_persist_flag: AtomicBool::new(false),
entropy_source: args.entropy_source,
node_signer: args.node_signer,
// All nodes start with a persistable update pending as `create_network` connects each node
// with all other nodes to make most tests simpler.
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
&nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// ... but the last node should not.
- assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
// After persisting the first two nodes they should no longer need fresh persistence.
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
- assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
// The nodes which are a party to the channel should also ignore messages from unrelated
// parties.
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// At this point the channel info given by peers should still be the same.
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
use bitcoin::blockdata::script::Script;
use bitcoin::hash_types::{Txid, BlockHash};
+use crate::blinded_path::payment::ReceiveTlvs;
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
use crate::ln::onion_utils;
use crate::onion_message;
+use crate::sign::{NodeSigner, Recipient};
use crate::prelude::*;
use core::convert::TryFrom;
use core::fmt;
use core::fmt::Debug;
+use core::ops::Deref;
use core::str::FromStr;
-use crate::io::{self, Read};
+use crate::io::{self, Cursor, Read};
use crate::io_extras::read_to_end;
use crate::events::{MessageSendEventsProvider, OnionMessageProvider};
+use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
use crate::util::logger;
-use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
+use crate::util::ser::{LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
use crate::util::base32;
use crate::routing::gossip::{NodeAlias, NodeId};
}
mod fuzzy_internal_msgs {
+ use bitcoin::secp256k1::PublicKey;
+ use crate::blinded_path::payment::PaymentConstraints;
use crate::prelude::*;
use crate::ln::{PaymentPreimage, PaymentSecret};
amt_msat: u64,
outgoing_cltv_value: u32,
},
+ BlindedReceive {
+ amt_msat: u64,
+ total_msat: u64,
+ outgoing_cltv_value: u32,
+ payment_secret: PaymentSecret,
+ payment_constraints: PaymentConstraints,
+ intro_node_blinding_point: PublicKey,
+ }
}
pub(crate) enum OutboundOnionPayload {
amt_msat: u64,
outgoing_cltv_value: u32,
},
+ BlindedForward {
+ encrypted_tlvs: Vec<u8>,
+ intro_node_blinding_point: Option<PublicKey>,
+ },
+ BlindedReceive {
+ amt_msat: u64,
+ total_msat: u64,
+ outgoing_cltv_value: u32,
+ encrypted_tlvs: Vec<u8>,
+ intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
+ }
}
pub struct DecodedOnionErrorPacket {
(16, payment_metadata.as_ref().map(|m| WithoutLength(m)), option)
}, custom_tlvs.iter());
},
+ Self::BlindedForward { encrypted_tlvs, intro_node_blinding_point } => {
+ _encode_varint_length_prefixed_tlv!(w, {
+ (10, *encrypted_tlvs, required_vec),
+ (12, intro_node_blinding_point, option)
+ });
+ },
+ Self::BlindedReceive {
+ amt_msat, total_msat, outgoing_cltv_value, encrypted_tlvs,
+ intro_node_blinding_point,
+ } => {
+ _encode_varint_length_prefixed_tlv!(w, {
+ (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
+ (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (10, *encrypted_tlvs, required_vec),
+ (12, intro_node_blinding_point, option),
+ (18, HighZeroBytesDroppedBigSize(*total_msat), required)
+ });
+ },
}
Ok(())
}
}
-impl Readable for InboundOnionPayload {
- fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
- let mut amt = HighZeroBytesDroppedBigSize(0u64);
- let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
+impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: NodeSigner {
+ fn read<R: Read>(r: &mut R, node_signer: &NS) -> Result<Self, DecodeError> {
+ let mut amt = None;
+ let mut cltv_value = None;
let mut short_id: Option<u64> = None;
let mut payment_data: Option<FinalOnionHopData> = None;
+ let mut encrypted_tlvs_opt: Option<WithoutLength<Vec<u8>>> = None;
+ let mut intro_node_blinding_point = None;
let mut payment_metadata: Option<WithoutLength<Vec<u8>>> = None;
+ let mut total_msat = None;
let mut keysend_preimage: Option<PaymentPreimage> = None;
let mut custom_tlvs = Vec::new();
let tlv_len = BigSize::read(r)?;
let rd = FixedLengthReader::new(r, tlv_len.0);
decode_tlv_stream_with_custom_tlv_decode!(rd, {
- (2, amt, required),
- (4, cltv_value, required),
+ (2, amt, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
+ (4, cltv_value, (option, encoding: (u32, HighZeroBytesDroppedBigSize))),
(6, short_id, option),
(8, payment_data, option),
+ (10, encrypted_tlvs_opt, option),
+ (12, intro_node_blinding_point, option),
(16, payment_metadata, option),
+ (18, total_msat, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
// See https://github.com/lightning/blips/blob/master/blip-0003.md
(5482373484, keysend_preimage, option)
}, |msg_type: u64, msg_reader: &mut FixedLengthReader<_>| -> Result<bool, DecodeError> {
Ok(true)
});
- if amt.0 > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
- if let Some(short_channel_id) = short_id {
- if payment_data.is_some() { return Err(DecodeError::InvalidValue) }
- if payment_metadata.is_some() { return Err(DecodeError::InvalidValue); }
+ if amt.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+
+ if let Some(blinding_point) = intro_node_blinding_point {
+ if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
+ let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
+ let enc_tlvs_ss = node_signer.ecdh(Recipient::Node, &blinding_point, None)
+ .map_err(|_| DecodeError::InvalidValue)?;
+ let rho = onion_utils::gen_rho_from_shared_secret(&enc_tlvs_ss.secret_bytes());
+ let mut s = Cursor::new(&enc_tlvs);
+ let mut reader = FixedLengthReader::new(&mut s, enc_tlvs.len() as u64);
+ match ChaChaPolyReadAdapter::read(&mut reader, rho)? {
+ ChaChaPolyReadAdapter { readable: ReceiveTlvs { payment_secret, payment_constraints }} => {
+ if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+ Ok(Self::BlindedReceive {
+ amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ total_msat: total_msat.ok_or(DecodeError::InvalidValue)?,
+ outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+ payment_secret,
+ payment_constraints,
+ intro_node_blinding_point: blinding_point,
+ })
+ },
+ }
+ } else if let Some(short_channel_id) = short_id {
+ if payment_data.is_some() || payment_metadata.is_some() || encrypted_tlvs_opt.is_some() ||
+ total_msat.is_some()
+ { return Err(DecodeError::InvalidValue) }
Ok(Self::Forward {
short_channel_id,
- amt_to_forward: amt.0,
- outgoing_cltv_value: cltv_value.0,
+ amt_to_forward: amt.ok_or(DecodeError::InvalidValue)?,
+ outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
})
} else {
+ if encrypted_tlvs_opt.is_some() || total_msat.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
if let Some(data) = &payment_data {
if data.total_msat > MAX_VALUE_MSAT {
return Err(DecodeError::InvalidValue);
payment_data,
payment_metadata: payment_metadata.map(|w| w.0),
keysend_preimage,
- amt_msat: amt.0,
- outgoing_cltv_value: cltv_value.0,
+ amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
custom_tlvs,
})
}
}
}
-// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
-// onion message packets.
-impl ReadableArgs<()> for InboundOnionPayload {
- fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
- <Self as Readable>::read(r)
- }
-}
-
impl Writeable for Ping {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.ponglen.write(w)?;
use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
use crate::ln::msgs::SocketAddress;
use crate::routing::gossip::{NodeAlias, NodeId};
- use crate::util::ser::{Writeable, Readable, Hostname, TransactionU16LenLimited};
+ use crate::util::ser::{Writeable, Readable, ReadableArgs, Hostname, TransactionU16LenLimited};
+ use crate::util::test_utils;
use bitcoin::hashes::hex::FromHex;
use bitcoin::util::address::Address;
let target_value = hex::decode("1a02080badf00d010203040404ffffffff0608deadbeef1bad1dea").unwrap();
assert_eq!(encoded_value, target_value);
- let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
- if let msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } = inbound_msg {
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ if let msgs::InboundOnionPayload::Forward {
+ short_channel_id, amt_to_forward, outgoing_cltv_value
+ } = inbound_msg {
assert_eq!(short_channel_id, 0xdeadbeef1bad1dea);
assert_eq!(amt_to_forward, 0x0badf00d01020304);
assert_eq!(outgoing_cltv_value, 0xffffffff);
let target_value = hex::decode("1002080badf00d010203040404ffffffff").unwrap();
assert_eq!(encoded_value, target_value);
- let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
- if let msgs::InboundOnionPayload::Receive { payment_data: None, amt_msat, outgoing_cltv_value, .. } = inbound_msg {
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ if let msgs::InboundOnionPayload::Receive {
+ payment_data: None, amt_msat, outgoing_cltv_value, ..
+ } = inbound_msg {
assert_eq!(amt_msat, 0x0badf00d01020304);
assert_eq!(outgoing_cltv_value, 0xffffffff);
} else { panic!(); }
let target_value = hex::decode("3602080badf00d010203040404ffffffff082442424242424242424242424242424242424242424242424242424242424242421badca1f").unwrap();
assert_eq!(encoded_value, target_value);
- let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
if let msgs::InboundOnionPayload::Receive {
payment_data: Some(FinalOnionHopData {
payment_secret,
outgoing_cltv_value: 0xffffffff,
};
let encoded_value = msg.encode();
- assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..])).is_err());
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).is_err());
let good_type_range_tlvs = vec![
((1 << 16) - 3, vec![42]),
((1 << 16) - 1, vec![42; 32]),
*custom_tlvs = good_type_range_tlvs.clone();
}
let encoded_value = msg.encode();
- let inbound_msg = Readable::read(&mut Cursor::new(&encoded_value[..])).unwrap();
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).unwrap();
match inbound_msg {
msgs::InboundOnionPayload::Receive { custom_tlvs, .. } => assert!(custom_tlvs.is_empty()),
_ => panic!(),
let encoded_value = msg.encode();
let target_value = hex::decode("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
assert_eq!(encoded_value, target_value);
- let inbound_msg: msgs::InboundOnionPayload = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
if let msgs::InboundOnionPayload::Receive {
payment_data: None,
payment_metadata: None,
// payload length to be encoded over multiple bytes rather than a single u8.
let big_payload = encode_big_payload().unwrap();
let mut rd = Cursor::new(&big_payload[..]);
- <msgs::InboundOnionPayload as Readable>::read(&mut rd).unwrap();
+
+ let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+ <msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestKeysInterface>>
+ ::read(&mut rd, &&node_signer).unwrap();
}
// see above test, needs to be a separate method for use of the serialization macros.
fn encode_big_payload() -> Result<Vec<u8>, io::Error> {