Rm ChannelMonitor merge capabilities in favor of explicit add/update
[rust-lightning] / lightning / src / util / test_utils.rs
1 use chain::chaininterface;
2 use chain::chaininterface::ConfirmationTarget;
3 use chain::transaction::OutPoint;
4 use chain::keysinterface;
5 use ln::channelmonitor;
6 use ln::features::InitFeatures;
7 use ln::msgs;
8 use ln::msgs::LightningError;
9 use ln::channelmonitor::HTLCUpdate;
10 use util::enforcing_trait_impls::EnforcingChannelKeys;
11 use util::events;
12 use util::logger::{Logger, Level, Record};
13 use util::ser::{Readable, ReadableArgs, Writer, Writeable};
14
15 use bitcoin::blockdata::transaction::Transaction;
16 use bitcoin::blockdata::script::Script;
17 use bitcoin_hashes::sha256d::Hash as Sha256dHash;
18 use bitcoin::network::constants::Network;
19
20 use secp256k1::{SecretKey, PublicKey};
21
22 use std::time::{SystemTime, UNIX_EPOCH};
23 use std::sync::{Arc,Mutex};
24 use std::{mem};
25 use std::collections::{HashMap, HashSet};
26
27 pub struct TestVecWriter(pub Vec<u8>);
28 impl Writer for TestVecWriter {
29         fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
30                 self.0.extend_from_slice(buf);
31                 Ok(())
32         }
33         fn size_hint(&mut self, size: usize) {
34                 self.0.reserve_exact(size);
35         }
36 }
37
38 pub struct TestFeeEstimator {
39         pub sat_per_kw: u64,
40 }
41 impl chaininterface::FeeEstimator for TestFeeEstimator {
42         fn get_est_sat_per_1000_weight(&self, _confirmation_target: ConfirmationTarget) -> u64 {
43                 self.sat_per_kw
44         }
45 }
46
47 pub struct TestChannelMonitor<'a> {
48         pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
49         pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
50         pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface>,
51         pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
52 }
53 impl<'a> TestChannelMonitor<'a> {
54         pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
55                 Self {
56                         added_monitors: Mutex::new(Vec::new()),
57                         latest_monitor_update_id: Mutex::new(HashMap::new()),
58                         simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
59                         update_ret: Mutex::new(Ok(())),
60                 }
61         }
62 }
63 impl<'a> channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor<'a> {
64         fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
65                 // At every point where we get a monitor update, we should be able to send a useful monitor
66                 // to a watchtower and disk...
67                 let mut w = TestVecWriter(Vec::new());
68                 monitor.write_for_disk(&mut w).unwrap();
69                 assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
70                                 &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == monitor);
71                 w.0.clear();
72                 monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
73                 self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
74                 self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
75                 assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
76                 self.update_ret.lock().unwrap().clone()
77         }
78
79         fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
80                 // Every monitor update should survive roundtrip
81                 let mut w = TestVecWriter(Vec::new());
82                 update.write(&mut w).unwrap();
83                 assert!(channelmonitor::ChannelMonitorUpdate::read(
84                                 &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
85
86                 self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
87                 assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok());
88                 // At every point where we get a monitor update, we should be able to send a useful monitor
89                 // to a watchtower and disk...
90                 let monitors = self.simple_monitor.monitors.lock().unwrap();
91                 let monitor = monitors.get(&funding_txo).unwrap();
92                 w.0.clear();
93                 monitor.write_for_disk(&mut w).unwrap();
94                 assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
95                                 &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == *monitor);
96                 w.0.clear();
97                 monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
98                 self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
99                 self.update_ret.lock().unwrap().clone()
100         }
101
102         fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
103                 return self.simple_monitor.get_and_clear_pending_htlcs_updated();
104         }
105 }
106
107 pub struct TestBroadcaster {
108         pub txn_broadcasted: Mutex<Vec<Transaction>>,
109         pub broadcasted_txn: Mutex<HashSet<Sha256dHash>> // Temporary field while refactoring out tx duplication
110 }
111 impl chaininterface::BroadcasterInterface for TestBroadcaster {
112         fn broadcast_transaction(&self, tx: &Transaction) {
113                 {
114                         if let Some(_) = self.broadcasted_txn.lock().unwrap().get(&tx.txid()) {
115                                 // If commitment tx, HTLC-timeout or HTLC-Success, duplicate broadcast are still ok
116                                 if tx.input[0].sequence == 0xfffffffd {
117                                         return;
118                                 }
119                         }
120                 }
121                 self.broadcasted_txn.lock().unwrap().insert(tx.txid());
122                 self.txn_broadcasted.lock().unwrap().push(tx.clone());
123         }
124 }
125
126 pub struct TestChannelMessageHandler {
127         pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
128 }
129
130 impl TestChannelMessageHandler {
131         pub fn new() -> Self {
132                 TestChannelMessageHandler {
133                         pending_events: Mutex::new(Vec::new()),
134                 }
135         }
136 }
137
138 impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
139         fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::OpenChannel) {}
140         fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::AcceptChannel) {}
141         fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {}
142         fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {}
143         fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) {}
144         fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) {}
145         fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {}
146         fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {}
147         fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {}
148         fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {}
149         fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {}
150         fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {}
151         fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {}
152         fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {}
153         fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {}
154         fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {}
155         fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
156         fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
157         fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
158 }
159
160 impl events::MessageSendEventsProvider for TestChannelMessageHandler {
161         fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
162                 let mut pending_events = self.pending_events.lock().unwrap();
163                 let mut ret = Vec::new();
164                 mem::swap(&mut ret, &mut *pending_events);
165                 ret
166         }
167 }
168
169 pub struct TestRoutingMessageHandler {}
170
171 impl TestRoutingMessageHandler {
172         pub fn new() -> Self {
173                 TestRoutingMessageHandler {}
174         }
175 }
176 impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
177         fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
178                 Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
179         }
180         fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
181                 Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
182         }
183         fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> {
184                 Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
185         }
186         fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {}
187         fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> {
188                 Vec::new()
189         }
190         fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
191                 Vec::new()
192         }
193         fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
194                 true
195         }
196 }
197
198 pub struct TestLogger {
199         level: Level,
200         id: String,
201         pub lines: Mutex<HashMap<(String, String), usize>>,
202 }
203
204 impl TestLogger {
205         pub fn new() -> TestLogger {
206                 Self::with_id("".to_owned())
207         }
208         pub fn with_id(id: String) -> TestLogger {
209                 TestLogger {
210                         level: Level::Trace,
211                         id,
212                         lines: Mutex::new(HashMap::new())
213                 }
214         }
215         pub fn enable(&mut self, level: Level) {
216                 self.level = level;
217         }
218         pub fn assert_log(&self, module: String, line: String, count: usize) {
219                 let log_entries = self.lines.lock().unwrap();
220                 assert_eq!(log_entries.get(&(module, line)), Some(&count));
221         }
222 }
223
224 impl Logger for TestLogger {
225         fn log(&self, record: &Record) {
226                 *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
227                 if self.level >= record.level {
228                         println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args);
229                 }
230         }
231 }
232
233 pub struct TestKeysInterface {
234         backing: keysinterface::KeysManager,
235         pub override_session_priv: Mutex<Option<SecretKey>>,
236         pub override_channel_id_priv: Mutex<Option<[u8; 32]>>,
237 }
238
239 impl keysinterface::KeysInterface for TestKeysInterface {
240         type ChanKeySigner = EnforcingChannelKeys;
241
242         fn get_node_secret(&self) -> SecretKey { self.backing.get_node_secret() }
243         fn get_destination_script(&self) -> Script { self.backing.get_destination_script() }
244         fn get_shutdown_pubkey(&self) -> PublicKey { self.backing.get_shutdown_pubkey() }
245         fn get_channel_keys(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys {
246                 EnforcingChannelKeys::new(self.backing.get_channel_keys(inbound, channel_value_satoshis))
247         }
248
249         fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
250                 match *self.override_session_priv.lock().unwrap() {
251                         Some(key) => (key.clone(), [0; 32]),
252                         None => self.backing.get_onion_rand()
253                 }
254         }
255
256         fn get_channel_id(&self) -> [u8; 32] {
257                 match *self.override_channel_id_priv.lock().unwrap() {
258                         Some(key) => key.clone(),
259                         None => self.backing.get_channel_id()
260                 }
261         }
262 }
263
264 impl TestKeysInterface {
265         pub fn new(seed: &[u8; 32], network: Network, logger: Arc<Logger>) -> Self {
266                 let now = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards");
267                 Self {
268                         backing: keysinterface::KeysManager::new(seed, network, logger, now.as_secs(), now.subsec_nanos()),
269                         override_session_priv: Mutex::new(None),
270                         override_channel_id_priv: Mutex::new(None),
271                 }
272         }
273 }