use std::{cmp, fmt};
use std::io::Read;
-use util::events;
+use util::events::MessageSendEventsProvider;
use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
///
/// Messages MAY be called in parallel when they originate from different their_node_ids, however
/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
-pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync {
+pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync {
//Channel init:
/// Handle an incoming open_channel message from the given peer.
fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &OpenChannel);
/// For `gossip_queries` messages there are potential DoS vectors when handling
/// inbound queries. Implementors using an on-disk network graph should be aware of
/// repeated disk I/O for queries accessing different parts of the network graph.
-pub trait RoutingMessageHandler : Send + Sync + events::MessageSendEventsProvider {
+pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider {
/// Handle an incoming node_announcement message, returning true if it should be forwarded on,
/// false or returning an Err otherwise.
fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError>;
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
use util::logger::Logger;
-use util::events;
+use util::events::{MessageSendEvent, MessageSendEventsProvider};
use std::{cmp, fmt};
use std::sync::{RwLock, RwLockReadGuard};
pub network_graph: RwLock<NetworkGraph>,
chain_access: Option<C>,
full_syncs_requested: AtomicUsize,
- pending_events: Mutex<Vec<events::MessageSendEvent>>,
+ pending_events: Mutex<Vec<MessageSendEvent>>,
logger: L,
}
let number_of_blocks = 0xffffffff;
log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::MessageSendEvent::SendChannelRangeQuery {
+ pending_events.push(MessageSendEvent::SendChannelRangeQuery {
node_id: their_node_id.clone(),
msg: QueryChannelRange {
chain_hash: self.network_graph.read().unwrap().genesis_hash,
log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::MessageSendEvent::SendShortIdsQuery {
+ pending_events.push(MessageSendEvent::SendShortIdsQuery {
node_id: their_node_id.clone(),
msg: QueryShortChannelIds {
chain_hash: msg.chain_hash,
}
}
-impl<C: Deref, L: Deref> events::MessageSendEventsProvider for NetGraphMsgHandler<C, L>
+impl<C: Deref, L: Deref> MessageSendEventsProvider for NetGraphMsgHandler<C, L>
where
C::Target: chain::Access,
L::Target: Logger,
{
- fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let mut ret = Vec::new();
let mut pending_events = self.pending_events.lock().unwrap();
std::mem::swap(&mut ret, &mut pending_events);