edition = "2018"
[dependencies]
-base64 = "0.13.0"
-bech32 = "0.8"
-bitcoin = "0.28.1"
-bitcoin-bech32 = "0.12"
-lightning = { version = "0.0.110" }
-lightning-block-sync = { version = "0.0.110", features=["rest-client"] }
-lightning-net-tokio = { version = "0.0.110" }
-chrono = "0.4"
-hex = "0.3"
-rand = "0.4"
+bitcoin = "0.29"
+lightning = { version = "0.0.111" }
+lightning-block-sync = { version = "0.0.111", features=["rest-client"] }
+lightning-net-tokio = { version = "0.0.111" }
tokio = { version = "1.14.1", features = ["full"] }
tokio-postgres = { version="0.7.5" }
+futures = "0.3"
[profile.release]
opt-level = 3
+use std::convert::TryInto;
use std::env;
use std::net::SocketAddr;
+use std::io::Cursor;
use bitcoin::secp256k1::PublicKey;
+use lightning::ln::msgs::ChannelAnnouncement;
+use lightning::util::ser::Readable;
use lightning_block_sync::http::HttpEndpoint;
use tokio_postgres::Config;
use crate::hex_utils;
-pub(crate) const SCHEMA_VERSION: i32 = 1;
+use futures::stream::{FuturesUnordered, StreamExt};
+
+pub(crate) const SCHEMA_VERSION: i32 = 8;
pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
pub(crate) fn db_announcement_table_creation_query() -> &'static str {
"CREATE TABLE IF NOT EXISTS channel_announcements (
id SERIAL PRIMARY KEY,
- short_channel_id character varying(255) NOT NULL UNIQUE,
- block_height integer,
- chain_hash character varying(255),
+ short_channel_id bigint NOT NULL UNIQUE,
announcement_signed BYTEA,
seen timestamp NOT NULL DEFAULT NOW()
)"
pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
"CREATE TABLE IF NOT EXISTS channel_updates (
id SERIAL PRIMARY KEY,
- composite_index character varying(255) UNIQUE,
- chain_hash character varying(255),
- short_channel_id character varying(255),
- timestamp bigint,
- channel_flags integer,
- direction integer,
- disable boolean,
- cltv_expiry_delta integer,
- htlc_minimum_msat bigint,
- fee_base_msat integer,
- fee_proportional_millionths integer,
- htlc_maximum_msat bigint,
- blob_signed BYTEA,
+ short_channel_id bigint NOT NULL,
+ timestamp bigint NOT NULL,
+ channel_flags smallint NOT NULL,
+ direction boolean NOT NULL,
+ disable boolean NOT NULL,
+ cltv_expiry_delta integer NOT NULL,
+ htlc_minimum_msat bigint NOT NULL,
+ fee_base_msat integer NOT NULL,
+ fee_proportional_millionths integer NOT NULL,
+ htlc_maximum_msat bigint NOT NULL,
+ blob_signed BYTEA NOT NULL,
seen timestamp NOT NULL DEFAULT NOW()
)"
}
pub(crate) fn db_index_creation_query() -> &'static str {
"
- CREATE INDEX IF NOT EXISTS channels_seen ON channel_announcements(seen);
- CREATE INDEX IF NOT EXISTS channel_updates_scid ON channel_updates(short_channel_id);
- CREATE INDEX IF NOT EXISTS channel_updates_direction ON channel_updates(direction);
- CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
+ CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen, short_channel_id, direction) INCLUDE (id, blob_signed);
+ CREATE INDEX IF NOT EXISTS channel_updates_scid_seen ON channel_updates(short_channel_id, seen) INCLUDE (blob_signed);
+ CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
+ CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id, blob_signed);
+ CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
"
}
+pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client) {
+ if schema == 1 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_updates DROP COLUMN chain_hash", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements DROP COLUMN chain_hash", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 2 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema == 1 || schema == 2 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_updates DROP COLUMN short_channel_id", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates DROP COLUMN direction", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ADD COLUMN direction boolean DEFAULT null", &[]).await.unwrap();
+ loop {
+ let rows = tx.query("SELECT id, composite_index FROM channel_updates WHERE short_channel_id IS NULL LIMIT 50000", &[]).await.unwrap();
+ if rows.is_empty() { break; }
+ let mut updates = FuturesUnordered::new();
+ for row in rows {
+ let id: i32 = row.get("id");
+ let index: String = row.get("composite_index");
+ let tx_ref = &tx;
+ updates.push(async move {
+ let mut index_iter = index.split(":");
+ let scid_hex = index_iter.next().unwrap();
+ index_iter.next().unwrap();
+ let direction_str = index_iter.next().unwrap();
+ assert!(direction_str == "1" || direction_str == "0");
+ let direction = direction_str == "1";
+ let scid_be_bytes = hex_utils::to_vec(scid_hex).unwrap();
+ let scid = i64::from_be_bytes(scid_be_bytes.try_into().unwrap());
+ assert!(scid > 0); // Will roll over in some 150 years or so
+ tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
+ });
+ }
+ while let Some(_) = updates.next().await { }
+ }
+ tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER direction DROP DEFAULT", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER direction SET NOT NULL", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 3 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 3 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements DROP COLUMN short_channel_id", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
+ loop {
+ let rows = tx.query("SELECT id, announcement_signed FROM channel_announcements WHERE short_channel_id IS NULL LIMIT 10000", &[]).await.unwrap();
+ if rows.is_empty() { break; }
+ let mut updates = FuturesUnordered::new();
+ for row in rows {
+ let id: i32 = row.get("id");
+ let announcement: Vec<u8> = row.get("announcement_signed");
+ let tx_ref = &tx;
+ updates.push(async move {
+ let scid = ChannelAnnouncement::read(&mut Cursor::new(announcement)).unwrap().contents.short_channel_id as i64;
+ assert!(scid > 0); // Will roll over in some 150 years or so
+ tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
+ });
+ }
+ while let Some(_) = updates.next().await { }
+ }
+ tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 4 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 4 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER composite_index SET DATA TYPE character(29)", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 5 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 5 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET DATA TYPE smallint", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_announcements DROP COLUMN block_height", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 6 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 6 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("ALTER TABLE channel_updates DROP COLUMN composite_index", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER timestamp SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER disable SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER cltv_expiry_delta SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER htlc_minimum_msat SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER fee_base_msat SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER fee_proportional_millionths SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER htlc_maximum_msat SET NOT NULL", &[]).await.unwrap();
+ tx.execute("ALTER TABLE channel_updates ALTER blob_signed SET NOT NULL", &[]).await.unwrap();
+ tx.execute("CREATE UNIQUE INDEX channel_updates_key ON channel_updates (short_channel_id, direction, timestamp)", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 7 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 7 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("DROP INDEX channels_seen", &[]).await.unwrap();
+ tx.execute("DROP INDEX channel_updates_scid", &[]).await.unwrap();
+ tx.execute("DROP INDEX channel_updates_direction", &[]).await.unwrap();
+ tx.execute("DROP INDEX channel_updates_seen", &[]).await.unwrap();
+ tx.execute("DROP INDEX channel_updates_scid_seen", &[]).await.unwrap();
+ tx.execute("DROP INDEX channel_updates_scid_dir_seen", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 8 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema <= 1 || schema > SCHEMA_VERSION {
+ panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
+ }
+}
+
/// EDIT ME
pub(crate) fn ln_peers() -> Vec<(PublicKey, SocketAddr)> {
vec![
use std::sync::{Arc, RwLock};
use bitcoin::secp256k1::PublicKey;
+use lightning::ln::features::{InitFeatures, NodeFeatures};
use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, Init, LightningError, NodeAnnouncement, QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd, RoutingMessageHandler};
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
use lightning::util::events::{MessageSendEvent, MessageSendEventsProvider};
Ok(output_value)
}
- fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
- self.native_router.get_next_channel_announcements(starting_point, batch_amount)
+ fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
+ self.native_router.get_next_channel_announcement(starting_point)
}
- fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
- self.native_router.get_next_node_announcements(starting_point, batch_amount)
+ fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> {
+ self.native_router.get_next_node_announcement(starting_point)
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) {
fn handle_query_short_channel_ids(&self, their_node_id: &PublicKey, msg: QueryShortChannelIds) -> Result<(), LightningError> {
self.native_router.handle_query_short_channel_ids(their_node_id, msg)
}
+
+ fn provided_init_features(&self, their_node_id: &PublicKey) -> InitFeatures {
+ self.native_router.provided_init_features(their_node_id)
+ }
+
+ fn provided_node_features(&self) -> NodeFeatures {
+ self.native_router.provided_node_features()
+ }
}
Some(out)
}
-#[inline]
-pub fn hex_str(value: &[u8]) -> String {
- let mut res = String::with_capacity(64);
- for v in value {
- res += &format!("{:02x}", v);
- }
- res
-}
-
pub fn to_compressed_pubkey(hex: &str) -> Option<PublicKey> {
let data = match to_vec(&hex[0..33 * 2]) {
Some(bytes) => bytes,
use tokio_postgres::{Client, Connection, NoTls, Socket};
use tokio_postgres::tls::NoTlsStream;
-use crate::{config, hex_utils, TestLogger};
+use crate::{config, TestLogger};
use crate::serialization::MutatedProperties;
/// The delta set needs to be a BTreeMap so the keys are sorted.
let channel_iterator = read_only_graph.channels().into_iter();
channel_iterator
.filter(|c| c.1.announcement_message.is_some())
- .map(|c| hex_utils::hex_str(&c.1.announcement_message.as_ref().unwrap().contents.short_channel_id.to_be_bytes()))
- .collect::<Vec<String>>()
+ .map(|c| c.1.announcement_message.as_ref().unwrap().contents.short_channel_id as i64)
+ .collect::<Vec<_>>()
};
println!("Obtaining corresponding database entries");
// get all the channel announcements that are currently in the network graph
- let announcement_rows = client.query("SELECT short_channel_id, announcement_signed, seen FROM channel_announcements WHERE short_channel_id = any($1) ORDER BY short_channel_id ASC", &[&channel_ids]).await.unwrap();
+ let announcement_rows = client.query("SELECT announcement_signed, seen FROM channel_announcements WHERE short_channel_id = any($1) ORDER BY short_channel_id ASC", &[&channel_ids]).await.unwrap();
for current_announcement_row in announcement_rows {
let blob: Vec<u8> = current_announcement_row.get("announcement_signed");
// here is where the channels whose first update in either direction occurred after
// `last_seen_timestamp` are added to the selection
- let unannounced_rows = client.query("SELECT short_channel_id, blob_signed, seen FROM (SELECT DISTINCT ON (short_channel_id) short_channel_id, blob_signed, seen FROM channel_updates ORDER BY short_channel_id ASC, seen ASC) AS first_seens WHERE first_seens.seen >= $1", &[&last_sync_timestamp_object]).await.unwrap();
+ let unannounced_rows = client.query("SELECT blob_signed, seen FROM (SELECT DISTINCT ON (short_channel_id) short_channel_id, blob_signed, seen FROM channel_updates ORDER BY short_channel_id ASC, seen ASC) AS first_seens WHERE first_seens.seen >= $1", &[&last_sync_timestamp_object]).await.unwrap();
for current_row in unannounced_rows {
let blob: Vec<u8> = current_row.get("blob_signed");
// get the latest channel update in each direction prior to last_sync_timestamp, provided
// there was an update in either direction that happened after the last sync (to avoid
// collecting too many reference updates)
- let reference_rows = client.query("SELECT DISTINCT ON (short_channel_id, direction) id, short_channel_id, direction, blob_signed FROM channel_updates WHERE seen < $1 AND short_channel_id IN (SELECT short_channel_id FROM channel_updates WHERE seen >= $1 GROUP BY short_channel_id) ORDER BY short_channel_id ASC, direction ASC, seen DESC", &[&last_sync_timestamp_object]).await.unwrap();
+ let reference_rows = client.query("SELECT DISTINCT ON (short_channel_id, direction) id, direction, blob_signed FROM channel_updates WHERE seen < $1 AND short_channel_id IN (SELECT short_channel_id FROM channel_updates WHERE seen >= $1 GROUP BY short_channel_id) ORDER BY short_channel_id ASC, direction ASC, seen DESC", &[&last_sync_timestamp_object]).await.unwrap();
println!("Fetched reference rows ({}): {:?}", reference_rows.len(), start.elapsed());
last_seen_update_ids.push(update_id);
non_intermediate_ids.insert(update_id);
- let direction: i32 = current_reference.get("direction");
+ let direction: bool = current_reference.get("direction");
let blob: Vec<u8> = current_reference.get("blob_signed");
let mut readable = Cursor::new(blob);
let unsigned_channel_update = ChannelUpdate::read(&mut readable).unwrap().contents;
let scid = unsigned_channel_update.short_channel_id;
let current_channel_delta = delta_set.entry(scid).or_insert(ChannelDelta::default());
- let mut update_delta = if direction == 0 {
+ let mut update_delta = if !direction {
(*current_channel_delta).updates.0.get_or_insert(DirectedUpdateDelta::default())
- } else if direction == 1 {
- (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default())
} else {
- panic!("Channel direction must be binary!")
+ (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default())
};
update_delta.last_update_before_seen = Some(unsigned_channel_update);
}
intermediate_update_prefix = "DISTINCT ON (short_channel_id, direction)";
}
- let query_string = format!("SELECT {} id, short_channel_id, direction, blob_signed, seen FROM channel_updates WHERE seen >= $1 ORDER BY short_channel_id ASC, direction ASC, seen DESC", intermediate_update_prefix);
+ let query_string = format!("SELECT {} id, direction, blob_signed, seen FROM channel_updates WHERE seen >= $1 ORDER BY short_channel_id ASC, direction ASC, seen DESC", intermediate_update_prefix);
let intermediate_updates = client.query(&query_string, &[&last_sync_timestamp_object]).await.unwrap();
println!("Fetched intermediate rows ({}): {:?}", intermediate_updates.len(), start.elapsed());
}
intermediate_update_count += 1;
- let direction: i32 = intermediate_update.get("direction");
+ let direction: bool = intermediate_update.get("direction");
let current_seen_timestamp_object: SystemTime = intermediate_update.get("seen");
let current_seen_timestamp: u32 = current_seen_timestamp_object.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() as u32;
let blob: Vec<u8> = intermediate_update.get("blob_signed");
// get the write configuration for this particular channel's directional details
let current_channel_delta = delta_set.entry(scid).or_insert(ChannelDelta::default());
- let update_delta = if direction == 0 {
+ let update_delta = if !direction {
(*current_channel_delta).updates.0.get_or_insert(DirectedUpdateDelta::default())
- } else if direction == 1 {
- (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default())
} else {
- panic!("Channel direction must be binary!")
+ (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default())
};
{
// handle the latest deltas
- if direction == 0 && !previously_seen_directions.0 {
+ if !direction && !previously_seen_directions.0 {
previously_seen_directions.0 = true;
update_delta.latest_update_after_seen = Some(UpdateDelta {
seen: current_seen_timestamp,
update: unsigned_channel_update.clone(),
});
- } else if direction == 1 && !previously_seen_directions.1 {
+ } else if direction && !previously_seen_directions.1 {
previously_seen_directions.1 = true;
update_delta.latest_update_after_seen = Some(UpdateDelta {
seen: current_seen_timestamp,
use tokio::sync::mpsc;
use tokio_postgres::NoTls;
-use crate::{config, hex_utils, TestLogger};
+use crate::{config, TestLogger};
use crate::types::GossipMessage;
pub(crate) struct GossipPersister {
pub(crate) async fn persist_gossip(&mut self) {
let connection_config = config::db_connection_config();
- let (client, connection) =
+ let (mut client, connection) =
connection_config.connect(NoTls).await.unwrap();
tokio::spawn(async move {
panic!("db init error: {}", initialization_error);
}
+ let cur_schema = client.query("SELECT db_schema FROM config WHERE id = $1", &[&1]).await.unwrap();
+ if !cur_schema.is_empty() {
+ config::upgrade_db(cur_schema[0].get(0), &mut client).await;
+ }
+
let initialization = client
.execute(
// TODO: figure out a way to fix the id value without Postgres complaining about
match &gossip_message {
GossipMessage::ChannelAnnouncement(announcement) => {
- let scid = announcement.contents.short_channel_id;
- let scid_hex = hex_utils::hex_str(&scid.to_be_bytes());
- // scid is 8 bytes
- // block height is the first three bytes
- // to obtain block height, shift scid right by 5 bytes (40 bits)
- let block_height = (scid >> 5 * 8) as i32;
- let chain_hash = announcement.contents.chain_hash.as_ref();
- let chain_hash_hex = hex_utils::hex_str(chain_hash);
+ let scid = announcement.contents.short_channel_id as i64;
// start with the type prefix, which is already known a priori
let mut announcement_signed = Vec::new();
let result = client
.execute("INSERT INTO channel_announcements (\
short_channel_id, \
- block_height, \
- chain_hash, \
announcement_signed \
- ) VALUES ($1, $2, $3, $4) ON CONFLICT (short_channel_id) DO NOTHING", &[
- &scid_hex,
- &block_height,
- &chain_hash_hex,
+ ) VALUES ($1, $2) ON CONFLICT (short_channel_id) DO NOTHING", &[
+ &scid,
&announcement_signed
]).await;
if result.is_err() {
}
}
GossipMessage::ChannelUpdate(update) => {
- let scid = update.contents.short_channel_id;
- let scid_hex = hex_utils::hex_str(&scid.to_be_bytes());
-
- let chain_hash = update.contents.chain_hash.as_ref();
- let chain_hash_hex = hex_utils::hex_str(chain_hash);
+ let scid = update.contents.short_channel_id as i64;
let timestamp = update.contents.timestamp as i64;
- let channel_flags = update.contents.flags as i32;
- let direction = channel_flags & 1;
- let disable = (channel_flags & 2) > 0;
-
- let composite_index = format!("{}:{}:{}", scid_hex, timestamp, direction);
+ let direction = (update.contents.flags & 1) == 1;
+ let disable = (update.contents.flags & 2) > 0;
let cltv_expiry_delta = update.contents.cltv_expiry_delta as i32;
let htlc_minimum_msat = update.contents.htlc_minimum_msat as i64;
let result = client
.execute("INSERT INTO channel_updates (\
- composite_index, \
- chain_hash, \
short_channel_id, \
timestamp, \
channel_flags, \
fee_proportional_millionths, \
htlc_maximum_msat, \
blob_signed \
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) ON CONFLICT (composite_index) DO NOTHING", &[
- &composite_index,
- &chain_hash_hex,
- &scid_hex,
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT DO NOTHING", &[
+ &scid,
×tamp,
- &channel_flags,
+ &(update.contents.flags as i16),
&direction,
&disable,
&cltv_expiry_delta,
use std::collections::HashMap;
use bitcoin::BlockHash;
+use bitcoin::hashes::Hash;
use lightning::ln::msgs::{UnsignedChannelAnnouncement, UnsignedChannelUpdate};
use lightning::util::ser::{BigSize, Writeable};
announcements: vec![],
updates: vec![],
full_update_defaults: Default::default(),
- chain_hash: Default::default(),
+ chain_hash: BlockHash::all_zeros(),
latest_seen: 0,
};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hasher};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, Instant};
ErroringMessageHandler, IgnoringMessageHandler, MessageHandler, PeerManager,
};
use lightning::routing::gossip::NetworkGraph;
-use rand::{Rng, thread_rng};
use tokio::sync::mpsc;
use crate::{config, TestLogger};
pub(crate) async fn download_gossip(persistence_sender: mpsc::Sender<GossipMessage>,
completion_sender: mpsc::Sender<()>,
network_graph: Arc<NetworkGraph<TestLogger>>) {
- let mut key = [0; 32];
- let mut random_data = [0; 32];
- thread_rng().fill_bytes(&mut key);
- thread_rng().fill_bytes(&mut random_data);
- let our_node_secret = SecretKey::from_slice(&key).unwrap();
+ let mut key = [42; 32];
+ let mut random_data = [43; 32];
+ // Get something psuedo-random from std.
+ let mut key_hasher = RandomState::new().build_hasher();
+ key_hasher.write_u8(1);
+ key[0..8].copy_from_slice(&key_hasher.finish().to_ne_bytes());
+ let mut rand_hasher = RandomState::new().build_hasher();
+ rand_hasher.write_u8(2);
+ random_data[0..8].copy_from_slice(&rand_hasher.finish().to_ne_bytes());
+ let our_node_secret = SecretKey::from_slice(&key).unwrap();
let router = Arc::new(GossipRouter::new(network_graph, persistence_sender.clone()));
let message_handler = MessageHandler {
chan_handler: ErroringMessageHandler::new(),
route_handler: Arc::clone(&router),
+ onion_message_handler: IgnoringMessageHandler {},
};
let peer_handler = Arc::new(PeerManager::new(
message_handler,
our_node_secret,
+ 0xdeadbeef,
&random_data,
TestLogger::new(),
IgnoringMessageHandler {},
use crate::verifier::ChainVerifier;
pub(crate) type GossipChainAccess = Arc<ChainVerifier>;
-pub(crate) type GossipPeerManager = Arc<PeerManager<lightning_net_tokio::SocketDescriptor, ErroringMessageHandler, Arc<GossipRouter>, TestLogger, IgnoringMessageHandler>>;
+pub(crate) type GossipPeerManager = Arc<PeerManager<lightning_net_tokio::SocketDescriptor, ErroringMessageHandler, Arc<GossipRouter>, IgnoringMessageHandler, TestLogger, IgnoringMessageHandler>>;
#[derive(Debug)]
pub(crate) enum GossipMessage {