Rename ChannelMonitor::write_for_disk --> serialize_for_disk
[rust-lightning] / lightning / src / chain / channelmonitor.rs
index 93a4edb6e3c4cf4499afda966688db17c07dad69..889dfa211ee78d167dbad2229ffcaf7dc82d1d4c 100644 (file)
@@ -95,7 +95,7 @@ impl Readable for ChannelMonitorUpdate {
 }
 
 /// An error enum representing a failure to persist a channel monitor update.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub enum ChannelMonitorUpdateErr {
        /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
        /// our state failed, but is expected to succeed at some point in the future).
@@ -159,7 +159,7 @@ pub enum ChannelMonitorUpdateErr {
 /// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
 /// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
 /// corrupted.
-/// Contains a human-readable error message.
+/// Contains a developer-readable error message.
 #[derive(Debug)]
 pub struct MonitorUpdateError(pub &'static str);
 
@@ -746,7 +746,7 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
        /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
        /// returned block hash and the the current chain and then reconnecting blocks to get to the
        /// best chain) upon deserializing the object!
-       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+       pub fn serialize_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
                //TODO: We still write out all the serialization here manually instead of using the fancy
                //serialization framework we have, we should migrate things over to it.
                writer.write_all(&[SERIALIZATION_VERSION; 1])?;
@@ -1162,28 +1162,28 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// itself.
        ///
        /// panics if the given update is not the next update by update_id.
-       pub fn update_monitor<B: Deref, L: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
+       pub fn update_monitor<B: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
                where B::Target: BroadcasterInterface,
                                        L::Target: Logger,
        {
                if self.latest_update_id + 1 != updates.update_id {
                        panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
                }
-               for update in updates.updates.drain(..) {
+               for update in updates.updates.iter() {
                        match update {
                                ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
                                        if self.lockdown_from_offchain { panic!(); }
-                                       self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)?
+                                       self.provide_latest_holder_commitment_tx_info(commitment_tx.clone(), htlc_outputs.clone())?
                                },
                                ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
-                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger),
+                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger),
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
                                        self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
                                ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
-                                       self.provide_secret(idx, secret)?,
+                                       self.provide_secret(*idx, *secret)?,
                                ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
                                        self.lockdown_from_offchain = true;
-                                       if should_broadcast {
+                                       if *should_broadcast {
                                                self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
                                        } else {
                                                log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
@@ -2141,10 +2141,10 @@ pub trait Persist<Keys: ChannelKeys>: Send + Sync {
        /// stored channel data). Note that you **must** persist every new monitor to
        /// disk. See the `Persist` trait documentation for more details.
        ///
-       /// See [`ChannelMonitor::write_for_disk`] for writing out a `ChannelMonitor`,
+       /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`,
        /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
        ///
-       /// [`ChannelMonitor::write_for_disk`]: struct.ChannelMonitor.html#method.write_for_disk
+       /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk
        /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html
        fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor<Keys>) -> Result<(), ChannelMonitorUpdateErr>;
 
@@ -2167,12 +2167,12 @@ pub trait Persist<Keys: ChannelKeys>: Send + Sync {
        /// them in batches. The size of each monitor grows `O(number of state updates)`
        /// whereas updates are small and `O(1)`.
        ///
-       /// See [`ChannelMonitor::write_for_disk`] for writing out a `ChannelMonitor`,
+       /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`,
        /// [`ChannelMonitorUpdate::write`] for writing out an update, and
        /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
        ///
        /// [`ChannelMonitor::update_monitor`]: struct.ChannelMonitor.html#impl-1
-       /// [`ChannelMonitor::write_for_disk`]: struct.ChannelMonitor.html#method.write_for_disk
+       /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk
        /// [`ChannelMonitorUpdate::write`]: struct.ChannelMonitorUpdate.html#method.write
        /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html
        fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<Keys>) -> Result<(), ChannelMonitorUpdateErr>;