use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys};
-use ln::msgs;
+use ln::msgs::UnsignedChannelAnnouncement;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::io::Error;
/// Note that if this fails or is rejected, the channel will not be publicly announced and
/// our counterparty may (though likely will not) close the channel on us for violating the
/// protocol.
- fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;
+ fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;
/// Set the remote channel basepoints and remote/local to_self_delay.
/// This is done immediately on incoming channels and as soon as the channel is accepted on outgoing channels.
Ok(secp_ctx.sign(&sighash, &self.funding_key))
}
- fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
+ fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
Ok(secp_ctx.sign(&msghash, &self.funding_key))
}
use bitcoin::secp256k1::key::{SecretKey, PublicKey};
use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1::Error as SecpError;
use bitcoin::secp256k1;
use std::{cmp, mem};
impl TxCreationKeys {
/// Create a new TxCreationKeys from channel base points and the per-commitment point
- pub fn new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, secp256k1::Error> {
+ pub fn new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, SecpError> {
Ok(TxCreationKeys {
per_commitment_point: per_commitment_point.clone(),
revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?,
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
+use ln::msgs::NetAddress;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
use util::config::UserConfig;
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
- pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
+ pub(super) pending_msg_events: Vec<MessageSendEvent>,
}
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
- pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
let _ = self.total_consistency_lock.read().unwrap();
if addresses.len() > 500 {
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ fn get_and_clear_pending_events(&self) -> Vec<Event> {
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();
use util::logger::Logger;
use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
use util::{byte_utils, events};
+use util::events::Event;
use std::collections::{HashMap, hash_map};
use std::sync::Mutex;
use std::{hash,cmp, mem};
use std::ops::Deref;
+use std::io::Error;
/// An update generated by the underlying Channel itself which contains some new information the
/// ChannelMonitor should be made aware of.
L::Target: Logger,
C::Target: ChainWatchInterface,
{
- fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ fn get_and_clear_pending_events(&self) -> Vec<Event> {
let mut pending_events = Vec::new();
for chan in self.monitors.lock().unwrap().values_mut() {
pending_events.append(&mut chan.get_and_clear_pending_events());
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
pending_monitor_events: Vec<MonitorEvent>,
- pending_events: Vec<events::Event>,
+ pending_events: Vec<Event>,
// Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
// we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
/// returned block hash and the the current chain and then reconnecting blocks to get to the
/// best chain) upon deserializing the object!
- pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
//TODO: We still write out all the serialization here manually instead of using the fancy
//serialization framework we have, we should migrate things over to it.
writer.write_all(&[SERIALIZATION_VERSION; 1])?;
/// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
/// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
/// no internal locking in ChannelMonitors.
- pub fn get_and_clear_pending_events(&mut self) -> Vec<events::Event> {
+ pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
let mut ret = Vec::new();
mem::swap(&mut ret, &mut self.pending_events);
ret
},
OnchainEvent::MaturingOutput { descriptor } => {
log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
- self.pending_events.push(events::Event::SpendableOutputs {
+ self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
}
}
let pending_events_len: u64 = Readable::read(reader)?;
- let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<events::Event>()));
+ let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
for _ in 0..pending_events_len {
if let Some(event) = MaybeReadable::read(reader)? {
pending_events.push(event);
use chain::chaininterface::{ChainError, ChainWatchInterface};
use ln::features::{ChannelFeatures, NodeFeatures};
-use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField, MAX_VALUE_MSAT};
+use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
use util::logger::Logger;
self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx))
}
- fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
+ fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
let network_graph = self.network_graph.read().unwrap();
let mut result = Vec::with_capacity(batch_amount as usize);
let mut iter = network_graph.get_channels().range(starting_point..);
result
}
- fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
+ fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
let network_graph = self.network_graph.read().unwrap();
let mut result = Vec::with_capacity(batch_amount as usize);
let mut iter = if let Some(pubkey) = starting_point {