// Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
- /// Map from payment hash to the payment data and any HTLCs which are to us and can be
- /// failed/claimed by the user.
- ///
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the channels given here actually existing anymore by the time you
- /// go to read them!
- claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
// |
// |__`forward_htlcs`
// |
-// |__`channel_state`
-// | |
-// | |__`id_to_peer`
+// |__`pending_inbound_payments`
// | |
-// | |__`short_to_chan_info`
+// | |__`claimable_htlcs`
// | |
-// | |__`per_peer_state`
+// | |__`pending_outbound_payments`
// | |
-// | |__`outbound_scid_aliases`
-// | |
-// | |__`pending_inbound_payments`
+// | |__`channel_state`
+// | |
+// | |__`id_to_peer`
// | |
-// | |__`pending_outbound_payments`
+// | |__`short_to_chan_info`
+// | |
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
// | |
// | |__`best_block`
// | |
#[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+ /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+ /// failed/claimed by the user.
+ ///
+ /// Note that, no consistency guarantees are made about the channels given here actually
+ /// existing anymore by the time you go to read them!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
}
}
-macro_rules! handle_chan_restoration_locked {
- ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
- $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
- let mut htlc_forwards = None;
-
- let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
- let chanmon_update_is_none = chanmon_update.is_none();
- let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
- let res = loop {
- let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
- if !forwards.is_empty() {
- htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
- $channel_entry.get().get_funding_txo().unwrap(), forwards));
- }
-
- if chanmon_update.is_some() {
- // On reconnect, we, by definition, only resend a channel_ready if there have been
- // no commitment updates, so the only channel monitor update which could also be
- // associated with a channel_ready would be the funding_created/funding_signed
- // monitor update. That monitor update failing implies that we won't send
- // channel_ready until it's been updated, so we can't have a channel_ready and a
- // monitor update here (so we don't bother to handle it correctly below).
- assert!($channel_ready.is_none());
- // A channel monitor update makes no sense without either a channel_ready or a
- // commitment update to process after it. Since we can't have a channel_ready, we
- // only bother to handle the monitor-update + commitment_update case below.
- assert!($commitment_update.is_some());
- }
-
- if let Some(msg) = $channel_ready {
- // Similar to the above, this implies that we're letting the channel_ready fly
- // before it should be allowed to.
- assert!(chanmon_update.is_none());
- send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
- }
- if let Some(msg) = $announcement_sigs {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: counterparty_node_id,
- msg,
- });
- }
-
- emit_channel_ready_event!($self, $channel_entry.get_mut());
-
- let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
- if let Some(monitor_update) = chanmon_update {
- // We only ever broadcast a funding transaction in response to a funding_signed
- // message and the resulting monitor update. Thus, on channel_reestablish
- // message handling we can't have a funding transaction to broadcast. When
- // processing a monitor update finishing resulting in a funding broadcast, we
- // cannot have a second monitor update, thus this case would indicate a bug.
- assert!(funding_broadcastable.is_none());
- // Given we were just reconnected or finished updating a channel monitor, the
- // only case where we can get a new ChannelMonitorUpdate would be if we also
- // have some commitment updates to send as well.
- assert!($commitment_update.is_some());
- match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
- }
- }
- }
-
- macro_rules! handle_cs { () => {
- if let Some(update) = $commitment_update {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id,
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = $raa {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id,
- msg: revoke_and_ack,
- });
- }
- } }
- match $order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if let Some(tx) = funding_broadcastable {
- log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- $self.tx_broadcaster.broadcast_transaction(&tx);
- }
- break Ok(());
- };
-
- if chanmon_update_is_none {
- // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
- // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
- // should *never* end up calling back to `chain_monitor.update_channel()`.
- assert!(res.is_ok());
- }
-
- (htlc_forwards, res, counterparty_node_id)
- } }
-}
-
-macro_rules! post_handle_chan_restoration {
- ($self: ident, $locked_res: expr) => { {
- let (htlc_forwards, res, counterparty_node_id) = $locked_res;
-
- let _ = handle_error!($self, res, counterparty_node_id);
-
- if let Some(forwards) = htlc_forwards {
- $self.forward_htlcs(&mut [forwards][..]);
- }
- } }
-}
-
impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
- claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
forward_htlcs: Mutex::new(HashMap::new()),
+ claimable_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
}
- let per_peer_state = self.per_peer_state.read().unwrap();
- let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
- Some(peer_state) => {
- let peer_state = peer_state.lock().unwrap();
- let their_features = &peer_state.latest_features;
- chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
- },
- None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ let (shutdown_msg, monitor_update, htlcs) = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ }
};
failed_htlcs = htlcs;
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 {
macro_rules! forwarding_channel_not_found {
() => {
continue;
}
};
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(forward_chan_id) {
hash_map::Entry::Vacant(_) => {
forwarding_channel_not_found!();
payment_secret: $payment_data.payment_secret,
}
};
- let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+ let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let (_, htlcs) = claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
- match channel_state.claimable_htlcs.entry(payment_hash) {
+ match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
true
});
+ }
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- if htlcs.is_empty() {
- // This should be unreachable
- debug_assert!(false);
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ if htlcs.is_empty() {
+ // This should be unreachable
+ debug_assert!(false);
+ return false;
+ }
+ if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+ // In this case we're not going to handle any timeouts of the parts here.
+ if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ return true;
+ } else if htlcs.into_iter().any(|htlc| {
+ htlc.timer_ticks += 1;
+ return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+ }) {
+ timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false;
}
- if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
- // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
- // In this case we're not going to handle any timeouts of the parts here.
- if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
- return true;
- } else if htlcs.into_iter().any(|htlc| {
- htlc.timer_ticks += 1;
- return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
- }) {
- timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
- return false;
- }
- }
- true
- });
- }
+ }
+ true
+ });
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.claimable_htlcs.remove(payment_hash)
- };
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
self.our_network_pubkey.clone()
}
+ /// Handles a channel reentering a functional state, either due to reconnect or a monitor
+ /// update completion.
+ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
+ channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
+ commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
+ pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+ channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
+ -> Option<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> {
+ let mut htlc_forwards = None;
+
+ let counterparty_node_id = channel.get_counterparty_node_id();
+ if !pending_forwards.is_empty() {
+ htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
+ channel.get_funding_txo().unwrap(), pending_forwards));
+ }
+
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, pending_msg_events, channel, msg);
+ }
+ if let Some(msg) = announcement_sigs {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ }
+
+ emit_channel_ready_event!(self, channel);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+
+ if let Some(tx) = funding_broadcastable {
+ log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+
+ htlc_forwards
+ }
+
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let chan_restoration_res;
+ let htlc_forwards;
let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
})
} else { None }
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
(updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
- post_handle_chan_restoration!(self, chan_restoration_res);
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), chan);
+ try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let chan_restoration_res;
- let (htlcs_failed_forward, need_lnd_workaround) = {
+ let htlc_forwards;
+ let need_lnd_workaround = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
}
}
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
- chan_restoration_res = handle_chan_restoration_locked!(
- self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
- responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(
+ &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (responses.holding_cell_failed_htlcs, need_lnd_workaround)
+ need_lnd_workaround
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- post_handle_chan_restoration!(self, chan_restoration_res);
- self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
true
});
+ }
- if let Some(height) = height_opt {
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
- false
- } else { true }
- });
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ if let Some(height) = height_opt {
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+ false
+ } else { true }
});
- }
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ });
}
self.handle_init_event_channel_failures(failed_channels);
}
}
- let channel_state = self.channel_state.lock().unwrap();
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (channel_state.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
+ (claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
for htlc in previous_hops.iter() {
peer_state.latest_features.write(writer)?;
}
- let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
- let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
let events = self.pending_events.lock().unwrap();
(events.len() as u64).write(writer)?;
for event in events.iter() {
channel_state: Mutex::new(ChannelHolder {
by_id,
- claimable_htlcs,
pending_msg_events: Vec::new(),
}),
inbound_payment_key: expanded_inbound_key,
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
+ claimable_htlcs: Mutex::new(claimable_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),