pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
}
+/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
+/// the latest Init features we heard from the peer.
+struct PeerState {
+ latest_features: InitFeatures,
+}
+
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
channel_state: Mutex<ChannelHolder<ChanSigner>>,
our_network_key: SecretKey,
+ /// The bulk of our storage will eventually be here (channels and message queues and the like).
+ /// If we are connected to a peer we always at least have an entry here, even if no channels
+ /// are currently open with that peer.
+ /// Because adding or removing an entry is rare, we usually take an outer read lock and then
+ /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
+ /// new channel.
+ per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+
pending_events: Mutex<Vec<events::Event>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
}),
our_network_key: keys_manager.get_node_secret(),
+ per_peer_state: RwLock::new(HashMap::new()),
+
pending_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
let mut failed_payments = Vec::new();
+ let mut no_channels_remain = true;
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
short_to_id.remove(&short_id);
}
return false;
+ } else {
+ no_channels_remain = false;
}
}
true
}
});
}
+ if no_channels_remain {
+ self.per_peer_state.write().unwrap().remove(their_node_id);
+ }
+
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
}
}
- fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &msgs::Init) {
+ fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
let _ = self.total_consistency_lock.read().unwrap();
+
+ {
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
+ match peer_state_lock.entry(their_node_id.clone()) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(Mutex::new(PeerState {
+ latest_features: init_msg.features.clone(),
+ }));
+ },
+ hash_map::Entry::Occupied(e) => {
+ e.get().lock().unwrap().latest_features = init_msg.features.clone();
+ },
+ }
+ }
+
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
}
}
+ let per_peer_state = self.per_peer_state.write().unwrap();
+ (per_peer_state.len() as u64).write(writer)?;
+ for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
+ peer_pubkey.write(writer)?;
+ let peer_state = peer_state_mutex.lock().unwrap();
+ peer_state.latest_features.write(writer)?;
+ }
+
Ok(())
}
}
claimable_htlcs.insert(payment_hash, previous_hops);
}
+ let peer_count: u64 = Readable::read(reader)?;
+ let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128));
+ for _ in 0..peer_count {
+ let peer_pubkey = Readable::read(reader)?;
+ let peer_state = PeerState {
+ latest_features: Readable::read(reader)?,
+ };
+ per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
}),
our_network_key: args.keys_manager.get_node_secret(),
+ per_peer_state: RwLock::new(per_peer_state),
+
pending_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
keys_manager: args.keys_manager,