Dedup RemoteTxCache by removing OnchainTxHandler copy
[rust-lightning] / lightning / src / ln / onchaintx.rs
1 //! The logic to build claims and bump in-flight transactions until confirmations.
2 //!
3 //! OnchainTxHandler objetcs are fully-part of ChannelMonitor and encapsulates all
4 //! building, tracking, bumping and notifications functions.
5
6 use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
7 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
8 use bitcoin::blockdata::script::Script;
9
10 use bitcoin::hash_types::Txid;
11
12 use bitcoin::secp256k1::{Secp256k1, Signature};
13 use bitcoin::secp256k1;
14
15 use ln::msgs::DecodeError;
16 use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
17 use ln::channelmanager::PaymentPreimage;
18 use ln::chan_utils;
19 use ln::chan_utils::{TxCreationKeys, LocalCommitmentTransaction};
20 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
21 use chain::keysinterface::ChannelKeys;
22 use util::logger::Logger;
23 use util::ser::{ReadableArgs, Readable, Writer, Writeable};
24 use util::byte_utils;
25
26 use std::collections::{HashMap, hash_map};
27 use std::sync::Arc;
28 use std::cmp;
29 use std::ops::Deref;
30
31 const MAX_ALLOC_SIZE: usize = 64*1024;
32
33 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
34 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
35 #[derive(Clone, PartialEq)]
36 enum OnchainEvent {
37         /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
38         /// bump-txn candidate buffer.
39         Claim {
40                 claim_request: Txid,
41         },
42         /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx.
43         /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
44         /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
45         ContentiousOutpoint {
46                 outpoint: BitcoinOutPoint,
47                 input_material: InputMaterial,
48         }
49 }
50
51 /// Higher-level cache structure needed to re-generate bumped claim txn if needed
52 #[derive(Clone, PartialEq)]
53 pub struct ClaimTxBumpMaterial {
54         // At every block tick, used to check if pending claiming tx is taking too
55         // much time for confirmation and we need to bump it.
56         height_timer: Option<u32>,
57         // Tracked in case of reorg to wipe out now-superflous bump material
58         feerate_previous: u64,
59         // Soonest timelocks among set of outpoints claimed, used to compute
60         // a priority of not feerate
61         soonest_timelock: u32,
62         // Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
63         per_input_material: HashMap<BitcoinOutPoint, InputMaterial>,
64 }
65
66 impl Writeable for ClaimTxBumpMaterial  {
67         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
68                 self.height_timer.write(writer)?;
69                 writer.write_all(&byte_utils::be64_to_array(self.feerate_previous))?;
70                 writer.write_all(&byte_utils::be32_to_array(self.soonest_timelock))?;
71                 writer.write_all(&byte_utils::be64_to_array(self.per_input_material.len() as u64))?;
72                 for (outp, tx_material) in self.per_input_material.iter() {
73                         outp.write(writer)?;
74                         tx_material.write(writer)?;
75                 }
76                 Ok(())
77         }
78 }
79
80 impl Readable for ClaimTxBumpMaterial {
81         fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
82                 let height_timer = Readable::read(reader)?;
83                 let feerate_previous = Readable::read(reader)?;
84                 let soonest_timelock = Readable::read(reader)?;
85                 let per_input_material_len: u64 = Readable::read(reader)?;
86                 let mut per_input_material = HashMap::with_capacity(cmp::min(per_input_material_len as usize, MAX_ALLOC_SIZE / 128));
87                 for _ in 0 ..per_input_material_len {
88                         let outpoint = Readable::read(reader)?;
89                         let input_material = Readable::read(reader)?;
90                         per_input_material.insert(outpoint, input_material);
91                 }
92                 Ok(Self { height_timer, feerate_previous, soonest_timelock, per_input_material })
93         }
94 }
95
96 #[derive(PartialEq, Clone, Copy)]
97 pub(crate) enum InputDescriptors {
98         RevokedOfferedHTLC,
99         RevokedReceivedHTLC,
100         OfferedHTLC,
101         ReceivedHTLC,
102         RevokedOutput, // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
103 }
104
105 impl Writeable for InputDescriptors {
106         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
107                 match self {
108                         &InputDescriptors::RevokedOfferedHTLC => {
109                                 writer.write_all(&[0; 1])?;
110                         },
111                         &InputDescriptors::RevokedReceivedHTLC => {
112                                 writer.write_all(&[1; 1])?;
113                         },
114                         &InputDescriptors::OfferedHTLC => {
115                                 writer.write_all(&[2; 1])?;
116                         },
117                         &InputDescriptors::ReceivedHTLC => {
118                                 writer.write_all(&[3; 1])?;
119                         }
120                         &InputDescriptors::RevokedOutput => {
121                                 writer.write_all(&[4; 1])?;
122                         }
123                 }
124                 Ok(())
125         }
126 }
127
128 impl Readable for InputDescriptors {
129         fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
130                 let input_descriptor = match <u8 as Readable>::read(reader)? {
131                         0 => {
132                                 InputDescriptors::RevokedOfferedHTLC
133                         },
134                         1 => {
135                                 InputDescriptors::RevokedReceivedHTLC
136                         },
137                         2 => {
138                                 InputDescriptors::OfferedHTLC
139                         },
140                         3 => {
141                                 InputDescriptors::ReceivedHTLC
142                         },
143                         4 => {
144                                 InputDescriptors::RevokedOutput
145                         }
146                         _ => return Err(DecodeError::InvalidValue),
147                 };
148                 Ok(input_descriptor)
149         }
150 }
151
152 macro_rules! subtract_high_prio_fee {
153         ($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
154                 {
155                         $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority);
156                         let mut fee = $used_feerate * ($predicted_weight as u64) / 1000;
157                         if $value <= fee {
158                                 $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
159                                 fee = $used_feerate * ($predicted_weight as u64) / 1000;
160                                 if $value <= fee {
161                                         $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
162                                         fee = $used_feerate * ($predicted_weight as u64) / 1000;
163                                         if $value <= fee {
164                                                 log_error!($self, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
165                                                         fee, $value);
166                                                 false
167                                         } else {
168                                                 log_warn!($self, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
169                                                         $value);
170                                                 $value -= fee;
171                                                 true
172                                         }
173                                 } else {
174                                         log_warn!($self, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
175                                                 $value);
176                                         $value -= fee;
177                                         true
178                                 }
179                         } else {
180                                 $value -= fee;
181                                 true
182                         }
183                 }
184         }
185 }
186
187 impl Readable for Option<Vec<Option<(usize, Signature)>>> {
188         fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
189                 match Readable::read(reader)? {
190                         0u8 => Ok(None),
191                         1u8 => {
192                                 let vlen: u64 = Readable::read(reader)?;
193                                 let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<Option<(usize, Signature)>>()));
194                                 for _ in 0..vlen {
195                                         ret.push(match Readable::read(reader)? {
196                                                 0u8 => None,
197                                                 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
198                                                 _ => return Err(DecodeError::InvalidValue)
199                                         });
200                                 }
201                                 Ok(Some(ret))
202                         },
203                         _ => Err(DecodeError::InvalidValue),
204                 }
205         }
206 }
207
208 impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
209         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
210                 match self {
211                         &Some(ref vec) => {
212                                 1u8.write(writer)?;
213                                 (vec.len() as u64).write(writer)?;
214                                 for opt in vec.iter() {
215                                         match opt {
216                                                 &Some((ref idx, ref sig)) => {
217                                                         1u8.write(writer)?;
218                                                         (*idx as u64).write(writer)?;
219                                                         sig.write(writer)?;
220                                                 },
221                                                 &None => 0u8.write(writer)?,
222                                         }
223                                 }
224                         },
225                         &None => 0u8.write(writer)?,
226                 }
227                 Ok(())
228         }
229 }
230
231
232 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
233 /// do RBF bumping if possible.
234 pub struct OnchainTxHandler<ChanSigner: ChannelKeys> {
235         destination_script: Script,
236         local_commitment: Option<LocalCommitmentTransaction>,
237         // local_htlc_sigs and prev_local_htlc_sigs are in the order as they appear in the commitment
238         // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
239         // the set of HTLCs in the LocalCommitmentTransaction (including those which do not appear in
240         // the commitment transaction).
241         local_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
242         prev_local_commitment: Option<LocalCommitmentTransaction>,
243         prev_local_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
244         local_csv: u16,
245         remote_csv: u16,
246
247         key_storage: ChanSigner,
248
249         // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
250         // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
251         // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
252         // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
253         // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
254         // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
255         // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
256         // we need to regenerate new claim request with reduced set of still-claimable outpoints.
257         // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
258         // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
259         // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
260         #[cfg(test)] // Used in functional_test to verify sanitization
261         pub pending_claim_requests: HashMap<Txid, ClaimTxBumpMaterial>,
262         #[cfg(not(test))]
263         pending_claim_requests: HashMap<Txid, ClaimTxBumpMaterial>,
264
265         // Used to link outpoints claimed in a connected block to a pending claim request.
266         // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
267         // Value is (pending claim request identifier, confirmation_block), identifier
268         // is txid of the initial claiming transaction and is immutable until outpoint is
269         // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
270         // block with output gets disconnected.
271         #[cfg(test)] // Used in functional_test to verify sanitization
272         pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
273         #[cfg(not(test))]
274         claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
275
276         onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
277
278         secp_ctx: Secp256k1<secp256k1::All>,
279         logger: Arc<Logger>
280 }
281
282 impl<ChanSigner: ChannelKeys + Writeable> OnchainTxHandler<ChanSigner> {
283         pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
284                 self.destination_script.write(writer)?;
285                 self.local_commitment.write(writer)?;
286                 self.local_htlc_sigs.write(writer)?;
287                 self.prev_local_commitment.write(writer)?;
288                 self.prev_local_htlc_sigs.write(writer)?;
289
290                 self.local_csv.write(writer)?;
291
292                 self.remote_csv.write(writer)?;
293
294                 self.key_storage.write(writer)?;
295
296                 writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
297                 for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
298                         ancestor_claim_txid.write(writer)?;
299                         claim_tx_data.write(writer)?;
300                 }
301
302                 writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
303                 for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
304                         outp.write(writer)?;
305                         claim_and_height.0.write(writer)?;
306                         claim_and_height.1.write(writer)?;
307                 }
308
309                 writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
310                 for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
311                         writer.write_all(&byte_utils::be32_to_array(**target))?;
312                         writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
313                         for ev in events.iter() {
314                                 match *ev {
315                                         OnchainEvent::Claim { ref claim_request } => {
316                                                 writer.write_all(&[0; 1])?;
317                                                 claim_request.write(writer)?;
318                                         },
319                                         OnchainEvent::ContentiousOutpoint { ref outpoint, ref input_material } => {
320                                                 writer.write_all(&[1; 1])?;
321                                                 outpoint.write(writer)?;
322                                                 input_material.write(writer)?;
323                                         }
324                                 }
325                         }
326                 }
327                 Ok(())
328         }
329 }
330
331 impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for OnchainTxHandler<ChanSigner> {
332         fn read<R: ::std::io::Read>(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
333                 let destination_script = Readable::read(reader)?;
334
335                 let local_commitment = Readable::read(reader)?;
336                 let local_htlc_sigs = Readable::read(reader)?;
337                 let prev_local_commitment = Readable::read(reader)?;
338                 let prev_local_htlc_sigs = Readable::read(reader)?;
339
340                 let local_csv = Readable::read(reader)?;
341
342                 let remote_csv = Readable::read(reader)?;
343
344                 let key_storage = Readable::read(reader)?;
345
346                 let pending_claim_requests_len: u64 = Readable::read(reader)?;
347                 let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
348                 for _ in 0..pending_claim_requests_len {
349                         pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
350                 }
351
352                 let claimable_outpoints_len: u64 = Readable::read(reader)?;
353                 let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
354                 for _ in 0..claimable_outpoints_len {
355                         let outpoint = Readable::read(reader)?;
356                         let ancestor_claim_txid = Readable::read(reader)?;
357                         let height = Readable::read(reader)?;
358                         claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
359                 }
360                 let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
361                 let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
362                 for _ in 0..waiting_threshold_conf_len {
363                         let height_target = Readable::read(reader)?;
364                         let events_len: u64 = Readable::read(reader)?;
365                         let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
366                         for _ in 0..events_len {
367                                 let ev = match <u8 as Readable>::read(reader)? {
368                                         0 => {
369                                                 let claim_request = Readable::read(reader)?;
370                                                 OnchainEvent::Claim {
371                                                         claim_request
372                                                 }
373                                         },
374                                         1 => {
375                                                 let outpoint = Readable::read(reader)?;
376                                                 let input_material = Readable::read(reader)?;
377                                                 OnchainEvent::ContentiousOutpoint {
378                                                         outpoint,
379                                                         input_material
380                                                 }
381                                         }
382                                         _ => return Err(DecodeError::InvalidValue),
383                                 };
384                                 events.push(ev);
385                         }
386                         onchain_events_waiting_threshold_conf.insert(height_target, events);
387                 }
388
389                 Ok(OnchainTxHandler {
390                         destination_script,
391                         local_commitment,
392                         local_htlc_sigs,
393                         prev_local_commitment,
394                         prev_local_htlc_sigs,
395                         local_csv,
396                         remote_csv,
397                         key_storage,
398                         claimable_outpoints,
399                         pending_claim_requests,
400                         onchain_events_waiting_threshold_conf,
401                         secp_ctx: Secp256k1::new(),
402                         logger,
403                 })
404         }
405 }
406
407 impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
408         pub(super) fn new(destination_script: Script, keys: ChanSigner, local_csv: u16, remote_csv: u16, logger: Arc<Logger>) -> Self {
409
410                 let key_storage = keys;
411
412                 OnchainTxHandler {
413                         destination_script,
414                         local_commitment: None,
415                         local_htlc_sigs: None,
416                         prev_local_commitment: None,
417                         prev_local_htlc_sigs: None,
418                         local_csv,
419                         remote_csv,
420                         key_storage,
421                         pending_claim_requests: HashMap::new(),
422                         claimable_outpoints: HashMap::new(),
423                         onchain_events_waiting_threshold_conf: HashMap::new(),
424
425                         secp_ctx: Secp256k1::new(),
426                         logger,
427                 }
428         }
429
430         pub(super) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
431                 let mut tx_weight = 2; // count segwit flags
432                 for inp in inputs {
433                         // We use expected weight (and not actual) as signatures and time lock delays may vary
434                         tx_weight +=  match inp {
435                                 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
436                                 &InputDescriptors::RevokedOfferedHTLC => {
437                                         1 + 1 + 73 + 1 + 33 + 1 + 133
438                                 },
439                                 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
440                                 &InputDescriptors::RevokedReceivedHTLC => {
441                                         1 + 1 + 73 + 1 + 33 + 1 + 139
442                                 },
443                                 // number_of_witness_elements + sig_length + remotehtlc_sig  + preimage_length + preimage + witness_script_length + witness_script
444                                 &InputDescriptors::OfferedHTLC => {
445                                         1 + 1 + 73 + 1 + 32 + 1 + 133
446                                 },
447                                 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
448                                 &InputDescriptors::ReceivedHTLC => {
449                                         1 + 1 + 73 + 1 + 1 + 1 + 139
450                                 },
451                                 // number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
452                                 &InputDescriptors::RevokedOutput => {
453                                         1 + 1 + 73 + 1 + 1 + 1 + 77
454                                 },
455                         };
456                 }
457                 tx_weight
458         }
459
460         /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
461         /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
462         /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
463         /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
464         /// frequency of the bump and so increase our bets of success.
465         fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
466                 if timelock_expiration <= current_height + 3 {
467                         return current_height + 1
468                 } else if timelock_expiration - current_height <= 15 {
469                         return current_height + 3
470                 }
471                 current_height + 15
472         }
473
474         /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
475         /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
476         fn generate_claim_tx<F: Deref>(&mut self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: F) -> Option<(Option<u32>, u64, Transaction)>
477                 where F::Target: FeeEstimator
478         {
479                 if cached_claim_datas.per_input_material.len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
480                 let mut inputs = Vec::new();
481                 for outp in cached_claim_datas.per_input_material.keys() {
482                         log_trace!(self, "Outpoint {}:{}", outp.txid, outp.vout);
483                         inputs.push(TxIn {
484                                 previous_output: *outp,
485                                 script_sig: Script::new(),
486                                 sequence: 0xfffffffd,
487                                 witness: Vec::new(),
488                         });
489                 }
490                 let mut bumped_tx = Transaction {
491                         version: 2,
492                         lock_time: 0,
493                         input: inputs,
494                         output: vec![TxOut {
495                                 script_pubkey: self.destination_script.clone(),
496                                 value: 0
497                         }],
498                 };
499
500                 macro_rules! RBF_bump {
501                         ($amount: expr, $old_feerate: expr, $fee_estimator: expr, $predicted_weight: expr) => {
502                                 {
503                                         let mut used_feerate;
504                                         // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
505                                         let new_fee = if $old_feerate < $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) {
506                                                 let mut value = $amount;
507                                                 if subtract_high_prio_fee!(self, $fee_estimator, value, $predicted_weight, used_feerate) {
508                                                         // Overflow check is done in subtract_high_prio_fee
509                                                         $amount - value
510                                                 } else {
511                                                         log_trace!(self, "Can't new-estimation bump new claiming tx, amount {} is too small", $amount);
512                                                         return None;
513                                                 }
514                                         // ...else just increase the previous feerate by 25% (because that's a nice number)
515                                         } else {
516                                                 let fee = $old_feerate * $predicted_weight / 750;
517                                                 if $amount <= fee {
518                                                         log_trace!(self, "Can't 25% bump new claiming tx, amount {} is too small", $amount);
519                                                         return None;
520                                                 }
521                                                 fee
522                                         };
523
524                                         let previous_fee = $old_feerate * $predicted_weight / 1000;
525                                         let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * $predicted_weight / 1000;
526                                         // BIP 125 Opt-in Full Replace-by-Fee Signaling
527                                         //      * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
528                                         //      * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
529                                         let new_fee = if new_fee < previous_fee + min_relay_fee {
530                                                 new_fee + previous_fee + min_relay_fee - new_fee
531                                         } else {
532                                                 new_fee
533                                         };
534                                         Some((new_fee, new_fee * 1000 / $predicted_weight))
535                                 }
536                         }
537                 }
538
539                 // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
540                 // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
541                 let new_timer = Some(Self::get_height_timer(height, cached_claim_datas.soonest_timelock));
542                 let mut inputs_witnesses_weight = 0;
543                 let mut amt = 0;
544                 let mut dynamic_fee = true;
545                 for per_outp_material in cached_claim_datas.per_input_material.values() {
546                         match per_outp_material {
547                                 &InputMaterial::Revoked { ref input_descriptor, ref amount, .. } => {
548                                         inputs_witnesses_weight += Self::get_witnesses_weight(&[*input_descriptor]);
549                                         amt += *amount;
550                                 },
551                                 &InputMaterial::RemoteHTLC { ref preimage, ref htlc, .. } => {
552                                         inputs_witnesses_weight += Self::get_witnesses_weight(if preimage.is_some() { &[InputDescriptors::OfferedHTLC] } else { &[InputDescriptors::ReceivedHTLC] });
553                                         amt += htlc.amount_msat / 1000;
554                                 },
555                                 &InputMaterial::LocalHTLC { .. } => {
556                                         dynamic_fee = false;
557                                 },
558                                 &InputMaterial::Funding { .. } => {
559                                         dynamic_fee = false;
560                                 }
561                         }
562                 }
563                 if dynamic_fee {
564                         let predicted_weight = bumped_tx.get_weight() + inputs_witnesses_weight;
565                         let mut new_feerate;
566                         // If old feerate is 0, first iteration of this claim, use normal fee calculation
567                         if cached_claim_datas.feerate_previous != 0 {
568                                 if let Some((new_fee, feerate)) = RBF_bump!(amt, cached_claim_datas.feerate_previous, fee_estimator, predicted_weight as u64) {
569                                         // If new computed fee is superior at the whole claimable amount burn all in fees
570                                         if new_fee > amt {
571                                                 bumped_tx.output[0].value = 0;
572                                         } else {
573                                                 bumped_tx.output[0].value = amt - new_fee;
574                                         }
575                                         new_feerate = feerate;
576                                 } else { return None; }
577                         } else {
578                                 if subtract_high_prio_fee!(self, fee_estimator, amt, predicted_weight, new_feerate) {
579                                         bumped_tx.output[0].value = amt;
580                                 } else { return None; }
581                         }
582                         assert!(new_feerate != 0);
583
584                         for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
585                                 match per_outp_material {
586                                         &InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc } => {
587                                                 if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
588
589                                                         let witness_script = if let Some(ref htlc) = *htlc {
590                                                                 chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key)
591                                                         } else {
592                                                                 chan_utils::get_revokeable_redeemscript(&chan_keys.revocation_key, self.remote_csv, &chan_keys.a_delayed_payment_key)
593                                                         };
594
595                                                         if let Ok(sig) = self.key_storage.sign_justice_transaction(&bumped_tx, i, &witness_script, *amount, &per_commitment_key, &chan_keys.revocation_key, htlc.is_some(),  &self.secp_ctx) {
596                                                                 bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
597                                                                 bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
598                                                                 if htlc.is_some() {
599                                                                         bumped_tx.input[i].witness.push(chan_keys.revocation_key.clone().serialize().to_vec());
600                                                                 } else {
601                                                                         bumped_tx.input[i].witness.push(vec!(1));
602                                                                 }
603                                                                 bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
604                                                         } else { return None; }
605                                                         //TODO: panic ?
606
607                                                         log_trace!(self, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if *input_descriptor == InputDescriptors::RevokedOutput { "to_local" } else if *input_descriptor == InputDescriptors::RevokedOfferedHTLC { "offered" } else if *input_descriptor == InputDescriptors::RevokedReceivedHTLC { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
608                                                 }
609                                         },
610                                         &InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc } => {
611                                                 if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
612                                                         let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key);
613
614                                                         if !preimage.is_some() { bumped_tx.lock_time = htlc.cltv_expiry }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
615                                                         if let Ok(sig) = self.key_storage.sign_remote_htlc_transaction(&bumped_tx, i, &witness_script, htlc.amount_msat / 1000, &per_commitment_point, preimage, &self.secp_ctx) {
616                                                                 bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
617                                                                 bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
618                                                                 if let &Some(preimage) = preimage {
619                                                                         bumped_tx.input[i].witness.push(preimage.0.to_vec());
620                                                                 } else {
621                                                                         // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
622                                                                         bumped_tx.input[i].witness.push(vec![]);
623                                                                 }
624                                                                 bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
625                                                         }
626                                                         log_trace!(self, "Going to broadcast Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
627                                                 }
628                                         },
629                                         _ => unreachable!()
630                                 }
631                         }
632                         log_trace!(self, "...with timer {}", new_timer.unwrap());
633                         assert!(predicted_weight >= bumped_tx.get_weight());
634                         return Some((new_timer, new_feerate, bumped_tx))
635                 } else {
636                         for (_, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
637                                 match per_outp_material {
638                                         &InputMaterial::LocalHTLC { ref preimage, ref amount } => {
639                                                 let htlc_tx = self.get_fully_signed_htlc_tx(outp, preimage);
640                                                 if let Some(htlc_tx) = htlc_tx {
641                                                         let feerate = (amount - htlc_tx.output[0].value) * 1000 / htlc_tx.get_weight() as u64;
642                                                         // Timer set to $NEVER given we can't bump tx without anchor outputs
643                                                         log_trace!(self, "Going to broadcast Local HTLC-{} claiming HTLC output {} from {}...", if preimage.is_some() { "Success" } else { "Timeout" }, outp.vout, outp.txid);
644                                                         return Some((None, feerate, htlc_tx));
645                                                 }
646                                                 return None;
647                                         },
648                                         &InputMaterial::Funding { ref funding_redeemscript } => {
649                                                 let signed_tx = self.get_fully_signed_local_tx(funding_redeemscript).unwrap();
650                                                 // Timer set to $NEVER given we can't bump tx without anchor outputs
651                                                 log_trace!(self, "Going to broadcast Local Transaction {} claiming funding output {} from {}...", signed_tx.txid(), outp.vout, outp.txid);
652                                                 return Some((None, self.local_commitment.as_ref().unwrap().feerate_per_kw, signed_tx));
653                                         }
654                                         _ => unreachable!()
655                                 }
656                         }
657                 }
658                 None
659         }
660
661         pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F)
662                 where B::Target: BroadcasterInterface,
663                       F::Target: FeeEstimator
664         {
665                 log_trace!(self, "Block at height {} connected with {} claim requests", height, claimable_outpoints.len());
666                 let mut new_claims = Vec::new();
667                 let mut aggregated_claim = HashMap::new();
668                 let mut aggregated_soonest = ::std::u32::MAX;
669
670                 // Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
671                 // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
672                 for req in claimable_outpoints {
673                         // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
674                         if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
675                                 log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
676                                 if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
677                                         let mut single_input = HashMap::new();
678                                         single_input.insert(req.outpoint, req.witness_data);
679                                         new_claims.push((req.absolute_timelock, single_input));
680                                 } else {
681                                         aggregated_claim.insert(req.outpoint, req.witness_data);
682                                         if req.absolute_timelock < aggregated_soonest {
683                                                 aggregated_soonest = req.absolute_timelock;
684                                         }
685                                 }
686                         }
687                 }
688                 new_claims.push((aggregated_soonest, aggregated_claim));
689
690                 // Generate claim transactions and track them to bump if necessary at
691                 // height timer expiration (i.e in how many blocks we're going to take action).
692                 for (soonest_timelock, claim) in new_claims.drain(..) {
693                         let mut claim_material = ClaimTxBumpMaterial { height_timer: None, feerate_previous: 0, soonest_timelock, per_input_material: claim };
694                         if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
695                                 claim_material.height_timer = new_timer;
696                                 claim_material.feerate_previous = new_feerate;
697                                 let txid = tx.txid();
698                                 for k in claim_material.per_input_material.keys() {
699                                         log_trace!(self, "Registering claiming request for {}:{}", k.txid, k.vout);
700                                         self.claimable_outpoints.insert(k.clone(), (txid, height));
701                                 }
702                                 self.pending_claim_requests.insert(txid, claim_material);
703                                 log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
704                                 broadcaster.broadcast_transaction(&tx);
705                         }
706                 }
707
708                 let mut bump_candidates = HashMap::new();
709                 for tx in txn_matched {
710                         // Scan all input to verify is one of the outpoint spent is of interest for us
711                         let mut claimed_outputs_material = Vec::new();
712                         for inp in &tx.input {
713                                 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
714                                         // If outpoint has claim request pending on it...
715                                         if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
716                                                 //... we need to verify equality between transaction outpoints and claim request
717                                                 // outpoints to know if transaction is the original claim or a bumped one issued
718                                                 // by us.
719                                                 let mut set_equality = true;
720                                                 if claim_material.per_input_material.len() != tx.input.len() {
721                                                         set_equality = false;
722                                                 } else {
723                                                         for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
724                                                                 if *claim_inp != tx_inp.previous_output {
725                                                                         set_equality = false;
726                                                                 }
727                                                         }
728                                                 }
729
730                                                 macro_rules! clean_claim_request_after_safety_delay {
731                                                         () => {
732                                                                 let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() };
733                                                                 match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
734                                                                         hash_map::Entry::Occupied(mut entry) => {
735                                                                                 if !entry.get().contains(&new_event) {
736                                                                                         entry.get_mut().push(new_event);
737                                                                                 }
738                                                                         },
739                                                                         hash_map::Entry::Vacant(entry) => {
740                                                                                 entry.insert(vec![new_event]);
741                                                                         }
742                                                                 }
743                                                         }
744                                                 }
745
746                                                 // If this is our transaction (or our counterparty spent all the outputs
747                                                 // before we could anyway with same inputs order than us), wait for
748                                                 // ANTI_REORG_DELAY and clean the RBF tracking map.
749                                                 if set_equality {
750                                                         clean_claim_request_after_safety_delay!();
751                                                 } else { // If false, generate new claim request with update outpoint set
752                                                         let mut at_least_one_drop = false;
753                                                         for input in tx.input.iter() {
754                                                                 if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
755                                                                         claimed_outputs_material.push((input.previous_output, input_material));
756                                                                         at_least_one_drop = true;
757                                                                 }
758                                                                 // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
759                                                                 if claim_material.per_input_material.is_empty() {
760                                                                         clean_claim_request_after_safety_delay!();
761                                                                 }
762                                                         }
763                                                         //TODO: recompute soonest_timelock to avoid wasting a bit on fees
764                                                         if at_least_one_drop {
765                                                                 bump_candidates.insert(first_claim_txid_height.0.clone(), claim_material.clone());
766                                                         }
767                                                 }
768                                                 break; //No need to iterate further, either tx is our or their
769                                         } else {
770                                                 panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
771                                         }
772                                 }
773                         }
774                         for (outpoint, input_material) in claimed_outputs_material.drain(..) {
775                                 let new_event = OnchainEvent::ContentiousOutpoint { outpoint, input_material };
776                                 match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
777                                         hash_map::Entry::Occupied(mut entry) => {
778                                                 if !entry.get().contains(&new_event) {
779                                                         entry.get_mut().push(new_event);
780                                                 }
781                                         },
782                                         hash_map::Entry::Vacant(entry) => {
783                                                 entry.insert(vec![new_event]);
784                                         }
785                                 }
786                         }
787                 }
788
789                 // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
790                 if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
791                         for ev in events {
792                                 match ev {
793                                         OnchainEvent::Claim { claim_request } => {
794                                                 // We may remove a whole set of claim outpoints here, as these one may have
795                                                 // been aggregated in a single tx and claimed so atomically
796                                                 if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
797                                                         for outpoint in bump_material.per_input_material.keys() {
798                                                                 self.claimable_outpoints.remove(&outpoint);
799                                                         }
800                                                 }
801                                         },
802                                         OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
803                                                 self.claimable_outpoints.remove(&outpoint);
804                                         }
805                                 }
806                         }
807                 }
808
809                 // Check if any pending claim request must be rescheduled
810                 for (first_claim_txid, ref claim_data) in self.pending_claim_requests.iter() {
811                         if let Some(h) = claim_data.height_timer {
812                                 if h == height {
813                                         bump_candidates.insert(*first_claim_txid, (*claim_data).clone());
814                                 }
815                         }
816                 }
817
818                 // Build, bump and rebroadcast tx accordingly
819                 log_trace!(self, "Bumping {} candidates", bump_candidates.len());
820                 for (first_claim_txid, claim_material) in bump_candidates.iter() {
821                         if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
822                                 log_trace!(self, "Broadcast onchain {}", log_tx!(bump_tx));
823                                 broadcaster.broadcast_transaction(&bump_tx);
824                                 if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) {
825                                         claim_material.height_timer = new_timer;
826                                         claim_material.feerate_previous = new_feerate;
827                                 }
828                         }
829                 }
830         }
831
832         pub(super) fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F)
833                 where B::Target: BroadcasterInterface,
834                       F::Target: FeeEstimator
835         {
836                 let mut bump_candidates = HashMap::new();
837                 if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
838                         //- our claim tx on a commitment tx output
839                         //- resurect outpoint back in its claimable set and regenerate tx
840                         for ev in events {
841                                 match ev {
842                                         OnchainEvent::ContentiousOutpoint { outpoint, input_material } => {
843                                                 if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) {
844                                                         if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
845                                                                 claim_material.per_input_material.insert(outpoint, input_material);
846                                                                 // Using a HashMap guarantee us than if we have multiple outpoints getting
847                                                                 // resurrected only one bump claim tx is going to be broadcast
848                                                                 bump_candidates.insert(ancestor_claimable_txid.clone(), claim_material.clone());
849                                                         }
850                                                 }
851                                         },
852                                         _ => {},
853                                 }
854                         }
855                 }
856                 for (_, claim_material) in bump_candidates.iter_mut() {
857                         if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
858                                 claim_material.height_timer = new_timer;
859                                 claim_material.feerate_previous = new_feerate;
860                                 broadcaster.broadcast_transaction(&bump_tx);
861                         }
862                 }
863                 for (ancestor_claim_txid, claim_material) in bump_candidates.drain() {
864                         self.pending_claim_requests.insert(ancestor_claim_txid.0, claim_material);
865                 }
866                 //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
867                 // right now if one of the outpoint get disconnected, just erase whole pending claim request.
868                 let mut remove_request = Vec::new();
869                 self.claimable_outpoints.retain(|_, ref v|
870                         if v.1 == height {
871                         remove_request.push(v.0.clone());
872                         false
873                         } else { true });
874                 for req in remove_request {
875                         self.pending_claim_requests.remove(&req);
876                 }
877         }
878
879         pub(super) fn provide_latest_local_tx(&mut self, tx: LocalCommitmentTransaction) -> Result<(), ()> {
880                 // To prevent any unsafe state discrepancy between offchain and onchain, once local
881                 // commitment transaction has been signed due to an event (either block height for
882                 // HTLC-timeout or channel force-closure), don't allow any further update of local
883                 // commitment transaction view to avoid delivery of revocation secret to counterparty
884                 // for the aformentionned signed transaction.
885                 if self.local_htlc_sigs.is_some() || self.prev_local_htlc_sigs.is_some() {
886                         return Err(());
887                 }
888                 self.prev_local_commitment = self.local_commitment.take();
889                 self.local_commitment = Some(tx);
890                 Ok(())
891         }
892
893         fn sign_latest_local_htlcs(&mut self) {
894                 if let Some(ref local_commitment) = self.local_commitment {
895                         if let Ok(sigs) = self.key_storage.sign_local_commitment_htlc_transactions(local_commitment, self.local_csv, &self.secp_ctx) {
896                                 self.local_htlc_sigs = Some(Vec::new());
897                                 let ret = self.local_htlc_sigs.as_mut().unwrap();
898                                 for (htlc_idx, (local_sig, &(ref htlc, _))) in sigs.iter().zip(local_commitment.per_htlc.iter()).enumerate() {
899                                         if let Some(tx_idx) = htlc.transaction_output_index {
900                                                 if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
901                                                 ret[tx_idx as usize] = Some((htlc_idx, local_sig.expect("Did not receive a signature for a non-dust HTLC")));
902                                         } else {
903                                                 assert!(local_sig.is_none(), "Received a signature for a dust HTLC");
904                                         }
905                                 }
906                         }
907                 }
908         }
909         fn sign_prev_local_htlcs(&mut self) {
910                 if let Some(ref local_commitment) = self.prev_local_commitment {
911                         if let Ok(sigs) = self.key_storage.sign_local_commitment_htlc_transactions(local_commitment, self.local_csv, &self.secp_ctx) {
912                                 self.prev_local_htlc_sigs = Some(Vec::new());
913                                 let ret = self.prev_local_htlc_sigs.as_mut().unwrap();
914                                 for (htlc_idx, (local_sig, &(ref htlc, _))) in sigs.iter().zip(local_commitment.per_htlc.iter()).enumerate() {
915                                         if let Some(tx_idx) = htlc.transaction_output_index {
916                                                 if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
917                                                 ret[tx_idx as usize] = Some((htlc_idx, local_sig.expect("Did not receive a signature for a non-dust HTLC")));
918                                         } else {
919                                                 assert!(local_sig.is_none(), "Received a signature for a dust HTLC");
920                                         }
921                                 }
922                         }
923                 }
924         }
925
926         //TODO: getting lastest local transactions should be infaillible and result in us "force-closing the channel", but we may
927         // have empty local commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
928         // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
929         // to monitor before.
930         pub(super) fn get_fully_signed_local_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
931                 if let Some(ref mut local_commitment) = self.local_commitment {
932                         match self.key_storage.sign_local_commitment(local_commitment, &self.secp_ctx) {
933                                 Ok(sig) => Some(local_commitment.add_local_sig(funding_redeemscript, sig)),
934                                 Err(_) => return None,
935                         }
936                 } else {
937                         None
938                 }
939         }
940
941         #[cfg(test)]
942         pub(super) fn get_fully_signed_copy_local_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
943                 if let Some(ref mut local_commitment) = self.local_commitment {
944                         let local_commitment = local_commitment.clone();
945                         match self.key_storage.sign_local_commitment(&local_commitment, &self.secp_ctx) {
946                                 Ok(sig) => Some(local_commitment.add_local_sig(funding_redeemscript, sig)),
947                                 Err(_) => return None,
948                         }
949                 } else {
950                         None
951                 }
952         }
953
954         pub(super) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
955                 let mut htlc_tx = None;
956                 if self.local_commitment.is_some() {
957                         let commitment_txid = self.local_commitment.as_ref().unwrap().txid();
958                         if commitment_txid == outp.txid {
959                                 self.sign_latest_local_htlcs();
960                                 if let &Some(ref htlc_sigs) = &self.local_htlc_sigs {
961                                         let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
962                                         htlc_tx = Some(self.local_commitment.as_ref().unwrap()
963                                                 .get_signed_htlc_tx(*htlc_idx, htlc_sig, preimage, self.local_csv));
964                                 }
965                         }
966                 }
967                 if self.prev_local_commitment.is_some() {
968                         let commitment_txid = self.prev_local_commitment.as_ref().unwrap().txid();
969                         if commitment_txid == outp.txid {
970                                 self.sign_prev_local_htlcs();
971                                 if let &Some(ref htlc_sigs) = &self.prev_local_htlc_sigs {
972                                         let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
973                                         htlc_tx = Some(self.prev_local_commitment.as_ref().unwrap()
974                                                 .get_signed_htlc_tx(*htlc_idx, htlc_sig, preimage, self.local_csv));
975                                 }
976                         }
977                 }
978                 htlc_tx
979         }
980
981         #[cfg(test)]
982         pub(super) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
983                 let latest_had_sigs = self.local_htlc_sigs.is_some();
984                 let prev_had_sigs = self.prev_local_htlc_sigs.is_some();
985                 let ret = self.get_fully_signed_htlc_tx(outp, preimage);
986                 if !latest_had_sigs {
987                         self.local_htlc_sigs = None;
988                 }
989                 if !prev_had_sigs {
990                         self.prev_local_htlc_sigs = None;
991                 }
992                 ret
993         }
994 }