Handle `transaction_unconfirmed` as a full reorg to the tx height
[rust-lightning] / lightning / src / chain / chainmonitor.rs
index f5aa47d18d76c722d221c90ca6782a5cf1cf51dc..fc4caef8afd90f6fa68a2d0e43238732d4c04e6c 100644 (file)
@@ -24,7 +24,7 @@
 //! servicing [`ChannelMonitor`] updates from the client.
 
 use bitcoin::blockdata::block::BlockHeader;
-use bitcoin::hash_types::Txid;
+use bitcoin::hash_types::{Txid, BlockHash};
 
 use crate::chain;
 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
@@ -395,6 +395,23 @@ where C::Target: chain::Filter,
                self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
        }
 
+       #[cfg(not(c_bindings))]
+       /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
+       pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
+               self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+                       (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
+               }).collect()
+       }
+
+       #[cfg(c_bindings)]
+       /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
+       pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
+               self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+                       (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
+               }).collect()
+       }
+
+
        #[cfg(test)]
        pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
                self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
@@ -544,7 +561,7 @@ where
                });
        }
 
-       fn get_relevant_txids(&self) -> Vec<Txid> {
+       fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
                let mut txids = Vec::new();
                let monitor_states = self.monitors.read().unwrap();
                for monitor_state in monitor_states.values() {
@@ -757,7 +774,7 @@ mod tests {
        use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
        use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
        use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
-       use crate::ln::channelmanager::{self, PaymentSendFailure};
+       use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::ChannelMessageHandler;
        use crate::util::errors::APIError;
@@ -798,7 +815,22 @@ mod tests {
                // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
                // fail either way but if it fails intermittently it's depending on the ordering of updates.
                let mut update_iter = updates.iter();
-               nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
+               let next_update = update_iter.next().unwrap().clone();
+               // Should contain next_update when pending updates listed.
+               #[cfg(not(c_bindings))]
+               assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
+                       .unwrap().contains(&next_update));
+               #[cfg(c_bindings)]
+               assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
+                       .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
+               nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
+               // Should not contain the previously pending next_update when pending updates listed.
+               #[cfg(not(c_bindings))]
+               assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
+                       .unwrap().contains(&next_update));
+               #[cfg(c_bindings)]
+               assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
+                       .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
                assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
@@ -883,7 +915,7 @@ mod tests {
                // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
                // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
                chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
-               unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)),
+               unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), PaymentId(second_payment_hash.0)),
                        true, APIError::ChannelUnavailable { ref err },
                        assert!(err.contains("ChannelMonitor storage failure")));
                check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update