use chain::chaininterface;
use chain::chaininterface::ConfirmationTarget;
use chain::chainmonitor;
+use chain::chainmonitor::MonitorUpdateId;
use chain::channelmonitor;
use chain::channelmonitor::MonitorEvent;
use chain::transaction::OutPoint;
use ln::msgs;
use ln::msgs::OptionalField;
use ln::script::ShutdownScript;
+use routing::scorer::{Eternity, ScorerUsingTime};
use util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use util::events;
use util::logger::{Logger, Level, Record};
self.0.extend_from_slice(buf);
Ok(())
}
- fn size_hint(&mut self, size: usize) {
- self.0.reserve_exact(size);
- }
}
pub struct TestFeeEstimator {
pub struct TestChainMonitor<'a> {
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
- pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
- pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a channelmonitor::Persist<EnforcingSigner>>,
+ pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64, MonitorUpdateId)>>,
+ pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
pub keys_manager: &'a TestKeysInterface,
- pub update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
- /// If this is set to Some(), after the next return, we'll always return this until update_ret
- /// is changed:
- pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
/// If this is set to Some(), the next update_channel call (not watch_channel) must be a
/// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
/// boolean.
pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
}
impl<'a> TestChainMonitor<'a> {
- pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+ pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
latest_monitor_update_id: Mutex::new(HashMap::new()),
chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister),
keys_manager,
- update_ret: Mutex::new(None),
- next_update_ret: Mutex::new(None),
expect_channel_force_closed: Mutex::new(None),
}
}
}
impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
let mut w = TestVecWriter(Vec::new());
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
&mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
assert!(new_monitor == monitor);
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
+ self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
- let watch_res = self.chain_monitor.watch_channel(funding_txo, new_monitor);
-
- let ret = self.update_ret.lock().unwrap().clone();
- if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
- *self.update_ret.lock().unwrap() = Some(next_ret);
- }
- if ret.is_some() {
- assert!(watch_res.is_ok());
- return ret.unwrap();
- }
- watch_res
+ self.chain_monitor.watch_channel(funding_txo, new_monitor)
}
- fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
// Every monitor update should survive roundtrip
let mut w = TestVecWriter(Vec::new());
update.write(&mut w).unwrap();
} else { panic!(); }
}
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
+ self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(&update)));
let update_res = self.chain_monitor.update_channel(funding_txo, update);
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
- let monitors = self.chain_monitor.monitors.read().unwrap();
- let monitor = monitors.get(&funding_txo).unwrap();
+ let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap();
w.0.clear();
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
&mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
assert!(new_monitor == *monitor);
self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
-
- let ret = self.update_ret.lock().unwrap().clone();
- if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
- *self.update_ret.lock().unwrap() = Some(next_ret);
- }
- if ret.is_some() {
- assert!(update_res.is_ok());
- return ret.unwrap();
- }
update_res
}
}
pub struct TestPersister {
- pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>
+ pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+ /// If this is set to Some(), after the next return, we'll always return this until update_ret
+ /// is changed:
+ pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+ /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
+ /// MonitorUpdateId here.
+ pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
}
impl TestPersister {
pub fn new() -> Self {
Self {
- update_ret: Mutex::new(Ok(()))
+ update_ret: Mutex::new(Ok(())),
+ next_update_ret: Mutex::new(None),
+ chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
}
}
- pub fn set_update_ret(&self, ret: Result<(), channelmonitor::ChannelMonitorUpdateErr>) {
+ pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
*self.update_ret.lock().unwrap() = ret;
}
+
+ pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+ *self.next_update_ret.lock().unwrap() = next_ret;
+ }
}
-impl<Signer: keysinterface::Sign> channelmonitor::Persist<Signer> for TestPersister {
- fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
- self.update_ret.lock().unwrap().clone()
+impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
+ fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ let ret = self.update_ret.lock().unwrap().clone();
+ if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
+ *self.update_ret.lock().unwrap() = next_ret;
+ }
+ ret
}
- fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor<Signer>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
- self.update_ret.lock().unwrap().clone()
+ fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ let ret = self.update_ret.lock().unwrap().clone();
+ if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
+ *self.update_ret.lock().unwrap() = next_ret;
+ }
+ if update.is_none() {
+ self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+ }
+ ret
}
}
pub struct TestRoutingMessageHandler {
pub chan_upds_recvd: AtomicUsize,
pub chan_anns_recvd: AtomicUsize,
- pub chan_anns_sent: AtomicUsize,
pub request_full_sync: AtomicBool,
}
TestRoutingMessageHandler {
chan_upds_recvd: AtomicUsize::new(0),
chan_anns_recvd: AtomicUsize::new(0),
- chan_anns_sent: AtomicUsize::new(0),
request_full_sync: AtomicBool::new(false),
}
}
}
fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
let mut chan_anns = Vec::new();
- const TOTAL_UPDS: u64 = 100;
- let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS - self.chan_anns_sent.load(Ordering::Acquire) as u64);
+ const TOTAL_UPDS: u64 = 50;
+ let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS);
for i in starting_point..end {
let chan_upd_1 = get_dummy_channel_update(i);
let chan_upd_2 = get_dummy_channel_update(i);
chan_anns.push((chan_ann, Some(chan_upd_1), Some(chan_upd_2)));
}
- self.chan_anns_sent.fetch_add(chan_anns.len(), Ordering::AcqRel);
chan_anns
}
.finish()
}
}
+
+/// A scorer useful in testing, when the passage of time isn't a concern.
+pub type TestScorer = ScorerUsingTime<Eternity>;