Merge pull request #20 from TheBlueMatt/master
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 17 Apr 2018 02:03:35 +0000 (22:03 -0400)
committerGitHub <noreply@github.com>
Tue, 17 Apr 2018 02:03:35 +0000 (22:03 -0400)
Rewrite parts of channel, cleanup a bunch of stuff

14 files changed:
Cargo.toml
build.rs [new file with mode: 0644]
fuzz/Cargo.toml
fuzz/fuzz_targets/channel_target.rs
fuzz/src/util/rust_crypto_nonstd_arch.c [new symlink]
src/ln/chan_utils.rs
src/ln/channel.rs
src/ln/channelmanager.rs
src/ln/channelmonitor.rs
src/ln/msgs.rs
src/ln/peer_handler.rs
src/util/events.rs
src/util/rust_crypto_nonstd_arch.c [new file with mode: 0644]
src/util/test_utils.rs

index 8e1946b1ec8e2abfdb2be5c8bd14b6e90b09bb76..f097d7fbc65d46ab62643254d720b42a97a08bf2 100644 (file)
@@ -8,6 +8,7 @@ description = """
 A Bitcoin Lightning implementation in Rust.
 Still super-early code-dump quality and is missing large chunks. See README in git repo for suggested projects if you want to contribute. Don't have to bother telling you not to use this for anything serious, because you'd have to finish building it to even try.
 """
+build = "build.rs"
 
 [features]
 # Supports tracking channels with a non-bitcoin chain hashes. Currently enables all kinds of fun DoS attacks.
@@ -15,7 +16,14 @@ non_bitcoin_chain_hash_routing = []
 fuzztarget = ["secp256k1/fuzztarget", "bitcoin/fuzztarget"]
 
 [dependencies]
-bitcoin = { git = "https://github.com/rust-bitcoin/rust-bitcoin" }
+bitcoin = "0.13"
 rust-crypto = "0.2"
 rand = "0.4"
 secp256k1 = "0.9"
+
+[build-dependencies]
+gcc = "0.3"
+
+[dev-dependencies.bitcoin]
+version = "0.13"
+features = ["bitcoinconsensus"]
diff --git a/build.rs b/build.rs
new file mode 100644 (file)
index 0000000..7dd3403
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,10 @@
+extern crate gcc;
+
+fn main() {
+       #[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "arm")))]
+       {
+               let mut cfg = gcc::Build::new();
+               cfg.file("src/util/rust_crypto_nonstd_arch.c");
+               cfg.compile("lib_rust_crypto_nonstd_arch.a");
+       }
+}
index 39f805d44ee404280547a32a3bfc12225f8f3c4f..9b718badf3d6a2e422dd14c564c0b573c825ea01 100644 (file)
@@ -3,6 +3,10 @@ name = "lightning-fuzz"
 version = "0.0.1"
 authors = ["Automatically generated"]
 publish = false
+# Because the function is unused it gets dropped before we link lightning, so
+# we have to duplicate build.rs here. Note that this is only required for
+# fuzztarget mode.
+build = "../build.rs"
 
 [package.metadata]
 cargo-fuzz = true
@@ -13,12 +17,15 @@ honggfuzz_fuzz = ["honggfuzz"]
 
 [dependencies]
 lightning = { path = "..", features = ["fuzztarget"] }
-bitcoin = { git = "https://github.com/rust-bitcoin/rust-bitcoin", features = ["fuzztarget"] }
+bitcoin = { version = "0.13", features = ["fuzztarget"] }
 secp256k1 = { version = "0.9", features = ["fuzztarget"] }
 rust-crypto = "0.2"
 honggfuzz = { version = "0.5", optional = true }
 afl = { version = "0.3", optional = true }
 
+[build-dependencies]
+gcc = "0.3"
+
 # Prevent this from interfering with workspaces
 [workspace]
 members = ["."]
index b643996105e81cd33621405bf0a77a070214989c..20fd4b4352d176df8c8f1749298ce3948354d1a5 100644 (file)
@@ -8,7 +8,7 @@ use bitcoin::util::hash::Sha256dHash;
 use bitcoin::network::serialize::{serialize, BitcoinHash};
 
 use lightning::ln::channel::Channel;
-use lightning::ln::channelmanager::PendingForwardHTLCInfo;
+use lightning::ln::channelmanager::{HTLCFailReason, PendingForwardHTLCInfo};
 use lightning::ln::msgs;
 use lightning::ln::msgs::MsgDecodable;
 use lightning::chain::chaininterface::{FeeEstimator, ConfirmationTarget};
@@ -241,11 +241,11 @@ pub fn do_test(data: &[u8]) {
                        },
                        4 => {
                                let update_fail_htlc = decode_msg_with_len16!(msgs::UpdateFailHTLC, 32 + 8, 1);
-                               return_err!(channel.update_fail_htlc(&update_fail_htlc));
+                               return_err!(channel.update_fail_htlc(&update_fail_htlc, HTLCFailReason::dummy()));
                        },
                        5 => {
                                let update_fail_malformed_htlc = decode_msg!(msgs::UpdateFailMalformedHTLC, 32+8+32+2);
-                               return_err!(channel.update_fail_malformed_htlc(&update_fail_malformed_htlc));
+                               return_err!(channel.update_fail_malformed_htlc(&update_fail_malformed_htlc, HTLCFailReason::dummy()));
                        },
                        6 => {
                                let commitment_signed = decode_msg_with_len16!(msgs::CommitmentSigned, 32+64, 64);
diff --git a/fuzz/src/util/rust_crypto_nonstd_arch.c b/fuzz/src/util/rust_crypto_nonstd_arch.c
new file mode 120000 (symlink)
index 0000000..321d648
--- /dev/null
@@ -0,0 +1 @@
+../../../src/util/rust_crypto_nonstd_arch.c
\ No newline at end of file
index dfe17c5911e711e3601d90670879718b24ecbcd8..81fa29f554113a5cb156205ab5be351ceb3290fb 100644 (file)
@@ -157,7 +157,7 @@ pub struct HTLCOutputInCommitment {
 }
 
 #[inline]
-pub fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a_htlc_key: &PublicKey, b_htlc_key: &PublicKey, revocation_key: &PublicKey, offered: bool) -> Script {
+pub fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a_htlc_key: &PublicKey, b_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script {
        let payment_hash160 = {
                let mut ripemd = Ripemd160::new();
                ripemd.input(&htlc.payment_hash);
@@ -165,7 +165,7 @@ pub fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a
                ripemd.result(&mut res);
                res
        };
-       if offered {
+       if htlc.offered {
                Builder::new().push_opcode(opcodes::All::OP_DUP)
                              .push_opcode(opcodes::All::OP_HASH160)
                              .push_slice(&Hash160::from_data(&revocation_key.serialize())[..])
@@ -231,5 +231,5 @@ pub fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a
 /// commitment secret. 'htlc' does *not* need to have its previous_output_index filled.
 #[inline]
 pub fn get_htlc_redeemscript(htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Script {
-       get_htlc_redeemscript_with_explicit_keys(htlc, &keys.a_htlc_key, &keys.b_htlc_key, &keys.revocation_key, htlc.offered)
+       get_htlc_redeemscript_with_explicit_keys(htlc, &keys.a_htlc_key, &keys.b_htlc_key, &keys.revocation_key)
 }
index b8a5bb66c5d39279f2e322d37ea7e4dfe4861703..067b2bc530dd1ae551c324f65318197421466ab1 100644 (file)
@@ -17,7 +17,7 @@ use crypto::hkdf::{hkdf_extract,hkdf_expand};
 use ln::msgs;
 use ln::msgs::{HandleError, MsgEncodable};
 use ln::channelmonitor::ChannelMonitor;
-use ln::channelmanager::PendingForwardHTLCInfo;
+use ln::channelmanager::{PendingForwardHTLCInfo, HTLCFailReason};
 use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment};
 use ln::chan_utils;
 use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
@@ -25,7 +25,7 @@ use util::{transaction_utils,rng};
 use util::sha2::Sha256;
 
 use std::default::Default;
-use std::cmp;
+use std::{cmp,mem};
 use std::time::Instant;
 
 pub struct ChannelKeys {
@@ -43,7 +43,7 @@ impl ChannelKeys {
        pub fn new_from_seed(seed: &[u8; 32]) -> Result<ChannelKeys, secp256k1::Error> {
                let mut prk = [0; 32];
                hkdf_extract(Sha256::new(), b"rust-lightning key gen salt", seed, &mut prk);
-               let secp_ctx = Secp256k1::new();
+               let secp_ctx = Secp256k1::without_caps();
 
                let mut okm = [0; 32];
                hkdf_expand(Sha256::new(), &prk, b"rust-lightning funding key info", &mut okm);
@@ -84,19 +84,73 @@ impl ChannelKeys {
 
 #[derive(PartialEq)]
 enum HTLCState {
+       /// Added by remote, to be included in next local commitment tx.
        RemoteAnnounced,
+       /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but
+       /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+       /// accept this HTLC. Implies AwaitingRemoteRevoke.
+       /// We also have not yet included this HTLC in a commitment_signed message, and are waiting on
+       /// a remote revoke_and_ack on a previous state before we can do so.
+       AwaitingRemoteRevokeToAnnounce,
+       /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but
+       /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+       /// accept this HTLC. Implies AwaitingRemoteRevoke.
+       /// We have included this HTLC in our latest commitment_signed and are now just waiting on a
+       /// revoke_and_ack.
+       AwaitingAnnouncedRemoteRevoke,
+       /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
+       /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
+       /// we will promote to Committed (note that they may not accept it until the next time we
+       /// revoke, but we dont really care about that:
+       ///  * they've revoked, so worst case we can announce an old state and get our (option on)
+       ///    money back (though we wont), and,
+       ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
+       ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
+       ///    doesn't matter to us and its up to them to enforce it, worst-case they jump ahead but
+       ///    we'll never get out of sync).
        LocalAnnounced,
        Committed,
+       /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
+       /// the change (though they'll need to revoke before we fail the payment).
+       RemoteRemoved,
+       /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
+       /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+       /// can do any backwards failing. Implies AwaitingRemoteRevoke.
+       /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
+       /// remote revoke_and_ack on a previous state before we can do so.
+       AwaitingRemoteRevokeToRemove,
+       /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
+       /// the remote side hasn't yet revoked their previous state, which we need them to do before we
+       /// can do any backwards failing. Implies AwaitingRemoteRevoke.
+       /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
+       /// revoke_and_ack to drop completely.
+       AwaitingRemovedRemoteRevoke,
+       /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
+       /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
+       /// we'll promote to LocalRemovedAwaitingCommitment if we fulfilled, otherwise we'll drop at
+       /// that point.
+       /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
+       /// commitment transaction without it as otherwise we'll have to force-close the channel to
+       /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
+       /// anyway).
+       LocalRemoved,
+       /// Removed by us, sent a new commitment_signed and got a revoke_and_ack. Just waiting on an
+       /// updated local commitment transaction.
+       LocalRemovedAwaitingCommitment,
 }
 
-struct HTLCOutput {
+struct HTLCOutput { //TODO: Refactor into Outbound/InboundHTLCOutput (will save memory and fewer panics)
        outbound: bool, // ie to an HTLC-Timeout transaction
        htlc_id: u64,
        amount_msat: u64,
        cltv_expiry: u32,
        payment_hash: [u8; 32],
        state: HTLCState,
-       // state == RemoteAnnounced implies pending_forward_state, otherwise it must be None
+       /// If we're in a Remote* removed state, set if they failed, otherwise None
+       fail_reason: Option<HTLCFailReason>,
+       /// If we're in LocalRemoved*, set to true if we fulfilled the HTLC, and can claim money
+       local_removed_fulfilled: bool,
+       /// state pre-committed Remote* implies pending_forward_state, otherwise it must be None
        pending_forward_state: Option<PendingForwardHTLCInfo>,
 }
 
@@ -113,13 +167,23 @@ impl HTLCOutput {
 }
 
 /// See AwaitingRemoteRevoke ChannelState for more info
-struct HTLCOutputAwaitingACK {
-       // always outbound
-       amount_msat: u64,
-       cltv_expiry: u32,
-       payment_hash: [u8; 32],
-       onion_routing_packet: msgs::OnionPacket,
-       time_created: Instant, //TODO: Some kind of timeout thing-a-majig
+enum HTLCUpdateAwaitingACK {
+       AddHTLC {
+               // always outbound
+               amount_msat: u64,
+               cltv_expiry: u32,
+               payment_hash: [u8; 32],
+               onion_routing_packet: msgs::OnionPacket,
+               time_created: Instant, //TODO: Some kind of timeout thing-a-majig
+       },
+       ClaimHTLC {
+               payment_preimage: [u8; 32],
+               payment_hash: [u8; 32], // Only here for effecient duplicate detection
+       },
+       FailHTLC {
+               payment_hash: [u8; 32],
+               err_packet: msgs::OnionErrorPacket,
+       },
 }
 
 enum ChannelState {
@@ -184,7 +248,7 @@ pub struct Channel {
        cur_remote_commitment_transaction_number: u64,
        value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
        pending_htlcs: Vec<HTLCOutput>,
-       holding_cell_htlcs: Vec<HTLCOutputAwaitingACK>,
+       holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
        next_local_htlc_id: u64,
        next_remote_htlc_id: u64,
        channel_update_count: u32,
@@ -221,6 +285,8 @@ pub struct Channel {
        their_delayed_payment_basepoint: PublicKey,
        their_htlc_basepoint: PublicKey,
        their_cur_commitment_point: PublicKey,
+
+       their_prev_commitment_point: Option<PublicKey>,
        their_node_id: PublicKey,
 
        their_shutdown_scriptpubkey: Option<Script>,
@@ -321,7 +387,7 @@ impl Channel {
                        cur_remote_commitment_transaction_number: (1 << 48) - 1,
                        value_to_self_msat: channel_value_satoshis * 1000, //TODO: give them something on open? Parameterize it?
                        pending_htlcs: Vec::new(),
-                       holding_cell_htlcs: Vec::new(),
+                       holding_cell_htlc_updates: Vec::new(),
                        next_local_htlc_id: 0,
                        next_remote_htlc_id: 0,
                        channel_update_count: 0,
@@ -349,6 +415,8 @@ impl Channel {
                        their_delayed_payment_basepoint: PublicKey::new(),
                        their_htlc_basepoint: PublicKey::new(),
                        their_cur_commitment_point: PublicKey::new(),
+
+                       their_prev_commitment_point: None,
                        their_node_id: their_node_id,
 
                        their_shutdown_scriptpubkey: None,
@@ -441,7 +509,7 @@ impl Channel {
                        cur_remote_commitment_transaction_number: (1 << 48) - 1,
                        value_to_self_msat: msg.push_msat,
                        pending_htlcs: Vec::new(),
-                       holding_cell_htlcs: Vec::new(),
+                       holding_cell_htlc_updates: Vec::new(),
                        next_local_htlc_id: 0,
                        next_remote_htlc_id: 0,
                        channel_update_count: 0,
@@ -470,6 +538,8 @@ impl Channel {
                        their_delayed_payment_basepoint: msg.delayed_payment_basepoint,
                        their_htlc_basepoint: msg.htlc_basepoint,
                        their_cur_commitment_point: msg.first_per_commitment_point,
+
+                       their_prev_commitment_point: None,
                        their_node_id: their_node_id,
 
                        their_shutdown_scriptpubkey: None,
@@ -548,9 +618,23 @@ impl Channel {
                let dust_limit_satoshis = if local { self.our_dust_limit_satoshis } else { self.their_dust_limit_satoshis };
                let mut remote_htlc_total_msat = 0;
                let mut local_htlc_total_msat = 0;
+               let mut value_to_self_msat_offset = 0;
 
                for ref htlc in self.pending_htlcs.iter() {
-                       if htlc.state == HTLCState::Committed || htlc.state == (if generated_by_local { HTLCState::LocalAnnounced } else { HTLCState::RemoteAnnounced }) {
+                       let include = match htlc.state {
+                               HTLCState::RemoteAnnounced => !generated_by_local,
+                               HTLCState::AwaitingRemoteRevokeToAnnounce => !generated_by_local,
+                               HTLCState::AwaitingAnnouncedRemoteRevoke => true,
+                               HTLCState::LocalAnnounced => generated_by_local,
+                               HTLCState::Committed => true,
+                               HTLCState::RemoteRemoved => generated_by_local,
+                               HTLCState::AwaitingRemoteRevokeToRemove => generated_by_local,
+                               HTLCState::AwaitingRemovedRemoteRevoke => false,
+                               HTLCState::LocalRemoved => !generated_by_local,
+                               HTLCState::LocalRemovedAwaitingCommitment => false,
+                       };
+
+                       if include {
                                if htlc.outbound == local { // "offered HTLC output"
                                        if htlc.amount_msat / 1000 >= dust_limit_satoshis + (self.feerate_per_kw * HTLC_TIMEOUT_TX_WEIGHT / 1000) {
                                                let htlc_in_tx = htlc.get_in_commitment(true);
@@ -573,12 +657,29 @@ impl Channel {
                                } else {
                                        remote_htlc_total_msat += htlc.amount_msat;
                                }
+                       } else {
+                               match htlc.state {
+                                       HTLCState::AwaitingRemoteRevokeToRemove|HTLCState::AwaitingRemovedRemoteRevoke => {
+                                               if generated_by_local && htlc.fail_reason.is_none() {
+                                                       value_to_self_msat_offset -= htlc.amount_msat as i64;
+                                               }
+                                       },
+                                       HTLCState::LocalRemoved => {
+                                               if !generated_by_local && htlc.local_removed_fulfilled {
+                                                       value_to_self_msat_offset += htlc.amount_msat as i64;
+                                               }
+                                       },
+                                       HTLCState::LocalRemovedAwaitingCommitment => {
+                                               value_to_self_msat_offset += htlc.amount_msat as i64;
+                                       },
+                                       _ => {},
+                               }
                        }
                }
 
                let total_fee: u64 = self.feerate_per_kw * (COMMITMENT_TX_BASE_WEIGHT + (txouts.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
-               let value_to_self: i64 = ((self.value_to_self_msat - local_htlc_total_msat) as i64) / 1000 - if self.channel_outbound { total_fee as i64 } else { 0 };
-               let value_to_remote: i64 = (((self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) / 1000) as i64) - if self.channel_outbound { 0 } else { total_fee as i64 };
+               let value_to_self: i64 = ((self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset) / 1000 - if self.channel_outbound { total_fee as i64 } else { 0 };
+               let value_to_remote: i64 = (((self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset) / 1000) - if self.channel_outbound { 0 } else { total_fee as i64 };
 
                let value_to_a = if local { value_to_self } else { value_to_remote };
                let value_to_b = if local { value_to_remote } else { value_to_self };
@@ -607,12 +708,9 @@ impl Channel {
                let mut htlcs_used: Vec<HTLCOutputInCommitment> = Vec::new();
                for (idx, out) in txouts.drain(..).enumerate() {
                        outputs.push(out.0);
-                       match out.1 {
-                               Some(out_htlc) => {
-                                       htlcs_used.push(out_htlc);
-                                       htlcs_used.last_mut().unwrap().transaction_output_index = idx as u32;
-                               },
-                               None => {}
+                       if let Some(out_htlc) = out.1 {
+                               htlcs_used.push(out_htlc);
+                               htlcs_used.last_mut().unwrap().transaction_output_index = idx as u32;
                        }
                }
 
@@ -770,7 +868,7 @@ impl Channel {
        /// Builds the htlc-success or htlc-timeout transaction which spends a given HTLC output
        /// @local is used only to convert relevant internal structures which refer to remote vs local
        /// to decide value of outputs and direction of HTLCs.
-       fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys) -> Result<Transaction, HandleError> {
+       fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys) -> Transaction {
                let mut txins: Vec<TxIn> = Vec::new();
                txins.push(TxIn {
                        prev_hash: prev_hash.clone(),
@@ -794,12 +892,12 @@ impl Channel {
                        value: htlc.amount_msat / 1000 - total_fee //TODO: BOLT 3 does not specify if we should add amount_msat before dividing or if we should divide by 1000 before subtracting (as we do here)
                });
 
-               Ok(Transaction {
+               Transaction {
                        version: 2,
                        lock_time: if htlc.offered { htlc.cltv_expiry } else { 0 },
                        input: txins,
                        output: txouts,
-               })
+               }
        }
 
        /// Signs a transaction created by build_htlc_transaction. If the transaction is an
@@ -843,7 +941,7 @@ impl Channel {
                Ok(())
        }
 
-       pub fn get_update_fulfill_htlc(&mut self, payment_preimage: [u8; 32]) -> Result<msgs::UpdateFulfillHTLC, HandleError> {
+       pub fn get_update_fulfill_htlc(&mut self, payment_preimage_arg: [u8; 32]) -> Result<Option<msgs::UpdateFulfillHTLC>, HandleError> {
                // Either ChannelFunded got set (which means it wont bet unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
@@ -854,68 +952,141 @@ impl Channel {
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                let mut sha = Sha256::new();
-               sha.input(&payment_preimage);
-               let mut payment_hash = [0; 32];
-               sha.result(&mut payment_hash);
+               sha.input(&payment_preimage_arg);
+               let mut payment_hash_calc = [0; 32];
+               sha.result(&mut payment_hash_calc);
+
+               // Now update local state:
+               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+                       for pending_update in self.holding_cell_htlc_updates.iter() {
+                               match pending_update {
+                                       &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, .. } => {
+                                               if payment_preimage_arg == *payment_preimage {
+                                                       return Ok(None);
+                                               }
+                                       },
+                                       &HTLCUpdateAwaitingACK::FailHTLC { ref payment_hash, .. } => {
+                                               if payment_hash_calc == *payment_hash {
+                                                       return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+                                               }
+                                       },
+                                       _ => {}
+                               }
+                       }
+                       self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+                               payment_preimage: payment_preimage_arg, payment_hash: payment_hash_calc,
+                       });
+                       return Ok(None);
+               }
 
                let mut htlc_id = 0;
                let mut htlc_amount_msat = 0;
-               //TODO: swap_remove since we dont need to maintain ordering here
-               self.pending_htlcs.retain(|ref htlc| {
-                       if !htlc.outbound && htlc.payment_hash == payment_hash {
+               for htlc in self.pending_htlcs.iter_mut() {
+                       if !htlc.outbound && htlc.payment_hash == payment_hash_calc {
                                if htlc_id != 0 {
                                        panic!("Duplicate HTLC payment_hash, you probably re-used payment preimages, NEVER DO THIS!");
                                }
                                htlc_id = htlc.htlc_id;
                                htlc_amount_msat += htlc.amount_msat;
-                               false
-                       } else { true }
-               });
+                               if htlc.state == HTLCState::Committed {
+                                       htlc.state = HTLCState::LocalRemoved;
+                                       htlc.local_removed_fulfilled = true;
+                               } else if htlc.state == HTLCState::RemoteAnnounced {
+                                       panic!("Somehow forwarded HTLC prior to remote revocation!");
+                               } else if htlc.state == HTLCState::LocalRemoved || htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+                                       return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+                               } else {
+                                       panic!("Have an inbound HTLC when not awaiting remote revoke that had a garbage state");
+                               }
+                       }
+               }
                if htlc_amount_msat == 0 {
                        return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
                }
+               self.channel_monitor.provide_payment_preimage(&payment_preimage_arg);
 
-               //TODO: This is racy af, they may have pending messages in flight to us that will not have
-               //received this yet!
-               self.value_to_self_msat += htlc_amount_msat;
-               Ok(msgs::UpdateFulfillHTLC {
+               Ok(Some(msgs::UpdateFulfillHTLC {
                        channel_id: self.channel_id(),
                        htlc_id: htlc_id,
-                       payment_preimage: payment_preimage,
-               })
+                       payment_preimage: payment_preimage_arg,
+               }))
+       }
+
+       pub fn get_update_fulfill_htlc_and_commit(&mut self, payment_preimage: [u8; 32]) -> Result<Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, HandleError> {
+               match self.get_update_fulfill_htlc(payment_preimage)? {
+                       Some(update_fulfill_htlc) =>
+                               Ok(Some((update_fulfill_htlc, self.send_commitment_no_status_check()?))),
+                       None => Ok(None)
+               }
        }
 
-       pub fn get_update_fail_htlc(&mut self, payment_hash: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<msgs::UpdateFailHTLC, HandleError> {
+       pub fn get_update_fail_htlc(&mut self, payment_hash_arg: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Was asked to fail an HTLC when channel was not in an operational state", msg: None});
                }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
+               // Now update local state:
+               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+                       for pending_update in self.holding_cell_htlc_updates.iter() {
+                               match pending_update {
+                                       &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_hash, .. } => {
+                                               if *payment_hash_arg == *payment_hash {
+                                                       return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+                                               }
+                                       },
+                                       &HTLCUpdateAwaitingACK::FailHTLC { ref payment_hash, .. } => {
+                                               if *payment_hash_arg == *payment_hash {
+                                                       return Ok(None);
+                                               }
+                                       },
+                                       _ => {}
+                               }
+                       }
+                       self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+                               payment_hash: payment_hash_arg.clone(),
+                               err_packet,
+                       });
+                       return Ok(None);
+               }
+
                let mut htlc_id = 0;
                let mut htlc_amount_msat = 0;
-               //TODO: swap_remove since we dont need to maintain ordering here
-               self.pending_htlcs.retain(|ref htlc| {
-                       if !htlc.outbound && htlc.payment_hash == *payment_hash {
+               for htlc in self.pending_htlcs.iter_mut() {
+                       if !htlc.outbound && htlc.payment_hash == *payment_hash_arg {
                                if htlc_id != 0 {
                                        panic!("Duplicate HTLC payment_hash, you probably re-used payment preimages, NEVER DO THIS!");
                                }
                                htlc_id = htlc.htlc_id;
                                htlc_amount_msat += htlc.amount_msat;
-                               false
-                       } else { true }
-               });
+                               if htlc.state == HTLCState::Committed {
+                                       htlc.state = HTLCState::LocalRemoved;
+                               } else if htlc.state == HTLCState::RemoteAnnounced {
+                                       panic!("Somehow forwarded HTLC prior to remote revocation!");
+                               } else if htlc.state == HTLCState::LocalRemoved || htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+                                       return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
+                               } else {
+                                       panic!("Have an inbound HTLC when not awaiting remote revoke that had a garbage state");
+                               }
+                       }
+               }
                if htlc_amount_msat == 0 {
                        return Err(HandleError{err: "Unable to find a pending HTLC which matched the given payment preimage", msg: None});
                }
 
-               //TODO: This is racy af, they may have pending messages in flight to us that will not have
-               //received this yet!
-
-               Ok(msgs::UpdateFailHTLC {
+               Ok(Some(msgs::UpdateFailHTLC {
                        channel_id: self.channel_id(),
                        htlc_id,
                        reason: err_packet
-               })
+               }))
+       }
+
+       pub fn get_update_fail_htlc_and_commit(&mut self, payment_hash: &[u8; 32], err_packet: msgs::OnionErrorPacket) -> Result<Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)>, HandleError> {
+               match self.get_update_fail_htlc(payment_hash, err_packet)? {
+                       Some(update_fail_htlc) =>
+                               Ok(Some((update_fail_htlc, self.send_commitment_no_status_check()?))),
+                       None => Ok(None)
+               }
        }
 
        // Message handlers:
@@ -1018,6 +1189,8 @@ impl Channel {
                self.channel_state = ChannelState::FundingSent as u32;
                let funding_txo = self.channel_monitor.get_funding_txo().unwrap();
                self.channel_id = funding_txo.0.into_be() ^ Uint256::from_u64(funding_txo.1 as u64).unwrap(); //TODO: or le?
+               self.cur_remote_commitment_transaction_number -= 1;
+               self.cur_local_commitment_transaction_number -= 1;
 
                Ok(msgs::FundingSigned {
                        channel_id: self.channel_id,
@@ -1034,7 +1207,7 @@ impl Channel {
                if self.channel_state != ChannelState::FundingCreated as u32 {
                        return Err(HandleError{err: "Received funding_signed in strange state!", msg: None});
                }
-               if self.channel_monitor.get_min_seen_secret() != (1 << 48) || self.cur_remote_commitment_transaction_number != (1 << 48) - 1 || self.cur_local_commitment_transaction_number != (1 << 48) - 1 {
+               if self.channel_monitor.get_min_seen_secret() != (1 << 48) || self.cur_remote_commitment_transaction_number != (1 << 48) - 2 || self.cur_local_commitment_transaction_number != (1 << 48) - 1 {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -1048,6 +1221,7 @@ impl Channel {
                secp_call!(self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey), "Invalid funding_signed signature from peer");
 
                self.channel_state = ChannelState::FundingSent as u32;
+               self.cur_local_commitment_transaction_number -= 1;
 
                Ok(())
        }
@@ -1062,24 +1236,35 @@ impl Channel {
                        return Err(HandleError{err: "Peer sent a funding_locked at a strange time", msg: None});
                }
 
-               //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
-               //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
-               //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
-               //a protocol oversight, but I assume I'm just missing something.
-               if self.their_cur_commitment_point != msg.next_per_commitment_point {
-                       return Err(HandleError{err: "Non-duplicate next_per_commitment_point in funding_locked", msg: None});
-               }
+               self.their_prev_commitment_point = Some(self.their_cur_commitment_point);
                self.their_cur_commitment_point = msg.next_per_commitment_point;
                Ok(())
        }
 
        /// Returns (inbound_htlc_count, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat)
-       fn get_pending_htlc_stats(&self) -> (u32, u32, u64, u64) {
+       /// If its for a remote update check, we need to be more lax about checking against messages we
+       /// sent but they may not have received/processed before they sent this message. Further, for
+       /// our own sends, we're more conservative and even consider things they've removed against
+       /// totals, though there is little reason to outside of further avoiding any race condition
+       /// issues.
+       fn get_pending_htlc_stats(&self, for_remote_update_check: bool) -> (u32, u32, u64, u64) {
                let mut inbound_htlc_count: u32 = 0;
                let mut outbound_htlc_count: u32 = 0;
                let mut htlc_outbound_value_msat = 0;
                let mut htlc_inbound_value_msat = 0;
                for ref htlc in self.pending_htlcs.iter() {
+                       match htlc.state {
+                               HTLCState::RemoteAnnounced => {},
+                               HTLCState::AwaitingRemoteRevokeToAnnounce => {},
+                               HTLCState::AwaitingAnnouncedRemoteRevoke => {},
+                               HTLCState::LocalAnnounced => { if for_remote_update_check { continue; } },
+                               HTLCState::Committed => {},
+                               HTLCState::RemoteRemoved =>  { if for_remote_update_check { continue; } },
+                               HTLCState::AwaitingRemoteRevokeToRemove =>  { if for_remote_update_check { continue; } },
+                               HTLCState::AwaitingRemovedRemoteRevoke =>  { if for_remote_update_check { continue; } },
+                               HTLCState::LocalRemoved =>  {},
+                               HTLCState::LocalRemovedAwaitingCommitment =>  { if for_remote_update_check { continue; } },
+                       }
                        if !htlc.outbound {
                                inbound_htlc_count += 1;
                                htlc_inbound_value_msat += htlc.amount_msat;
@@ -1102,7 +1287,7 @@ impl Channel {
                        return Err(HandleError{err: "Remote side tried to send less than our minimum HTLC value", msg: None});
                }
 
-               let (inbound_htlc_count, _, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats();
+               let (inbound_htlc_count, _, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(true);
                if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
                        return Err(HandleError{err: "Remote tried to push more than our max accepted HTLCs", msg: None});
                }
@@ -1135,6 +1320,8 @@ impl Channel {
                        payment_hash: msg.payment_hash,
                        cltv_expiry: msg.cltv_expiry,
                        state: HTLCState::RemoteAnnounced,
+                       fail_reason: None,
+                       local_removed_fulfilled: false,
                        pending_forward_state: Some(pending_forward_state),
                });
 
@@ -1142,9 +1329,8 @@ impl Channel {
        }
 
        /// Removes an outbound HTLC which has been commitment_signed by the remote end
-       fn remove_outbound_htlc(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>) -> Result<HTLCOutput, HandleError> {
-               let mut found_idx = None;
-               for (idx, ref htlc) in self.pending_htlcs.iter().enumerate() {
+       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>, fail_reason: Option<HTLCFailReason>) -> Result<(), HandleError> {
+               for mut htlc in self.pending_htlcs.iter_mut() {
                        if htlc.outbound && htlc.htlc_id == htlc_id {
                                match check_preimage {
                                        None => {},
@@ -1153,53 +1339,20 @@ impl Channel {
                                                        return Err(HandleError{err: "Remote tried to fulfill HTLC with an incorrect preimage", msg: None});
                                                }
                                };
-                               found_idx = Some(idx);
-                               break;
-                       }
-               }
-               match found_idx {
-                       None => Err(HandleError{err: "Remote tried to fulfill/fail an HTLC we couldn't find", msg: None}),
-                       Some(idx) => {
-                               Ok(self.pending_htlcs.swap_remove(idx))
-                       }
-               }
-       }
-
-       /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
-       /// fulfilling or failing the last pending HTLC)
-       fn free_holding_cell_htlcs(&mut self) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
-               if self.holding_cell_htlcs.len() != 0 {
-                       let mut new_htlcs = self.holding_cell_htlcs.split_off(0);
-                       let mut update_add_msgs = Vec::with_capacity(new_htlcs.len());
-                       let mut err = None;
-                       for new_htlc in new_htlcs.drain(..) {
-                               // Note that this *can* fail, though it should be due to rather-rare conditions on
-                               // fee races with adding too many outputs which push our total payments just over
-                               // the limit. In case its less rare than I anticipate, we may want to revisit
-                               // handling this case better and maybe fufilling some of the HTLCs while attempting
-                               // to rebalance channels.
-                               if self.holding_cell_htlcs.len() != 0 {
-                                       self.holding_cell_htlcs.push(new_htlc);
+                               if htlc.state == HTLCState::LocalAnnounced {
+                                       return Err(HandleError{err: "Remote tried to fulfill HTLC before it had been committed", msg: None});
+                               } else if htlc.state == HTLCState::Committed {
+                                       htlc.state = HTLCState::RemoteRemoved;
+                                       htlc.fail_reason = fail_reason;
+                               } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove || htlc.state == HTLCState::AwaitingRemovedRemoteRevoke || htlc.state == HTLCState::RemoteRemoved {
+                                       return Err(HandleError{err: "Remote tried to fulfill HTLC that they'd already fulfilled", msg: None});
                                } else {
-                                       match self.send_htlc(new_htlc.amount_msat, new_htlc.payment_hash, new_htlc.cltv_expiry, new_htlc.onion_routing_packet.clone()) {
-                                               Ok(update_add_msg_option) => update_add_msgs.push(update_add_msg_option.unwrap()),
-                                               Err(e) => {
-                                                       self.holding_cell_htlcs.push(new_htlc);
-                                                       err = Some(e);
-                                               }
-                                       }
+                                       panic!("Got a non-outbound state on an outbound HTLC");
                                }
+                               return Ok(());
                        }
-                       //TODO: Need to examine the type of err - if its a fee issue or similar we may want to
-                       //fail it back the route, if its a temporary issue we can ignore it...
-                       if update_add_msgs.len() > 0 {
-                               Ok(Some((update_add_msgs, self.send_commitment()?)))
-                       } else {
-                               Err(err.unwrap())
-                       }
-               } else {
-                       Ok(None)
                }
+               Err(HandleError{err: "Remote tried to fulfill/fail an HTLC we couldn't find", msg: None})
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
@@ -1212,50 +1365,27 @@ impl Channel {
                let mut payment_hash = [0; 32];
                sha.result(&mut payment_hash);
 
-               match self.remove_outbound_htlc(msg.htlc_id, Some(payment_hash)) {
-                       Err(e) => return Err(e),
-                       Ok(htlc) => {
-                               //TODO: Double-check that we didn't exceed some limits (or value_to_self went
-                               //negative here?)
-                               self.value_to_self_msat -= htlc.amount_msat;
-                       }
-               }
-               Ok(())
+               self.channel_monitor.provide_payment_preimage(&msg.payment_preimage);
+               self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None)
        }
 
-       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC) -> Result<[u8; 32], HandleError> {
+       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", msg: None});
                }
 
-               let payment_hash = match self.remove_outbound_htlc(msg.htlc_id, None) {
-                       Err(e) => return Err(e),
-                       Ok(htlc) => {
-                               //TODO: Double-check that we didn't exceed some limits (or value_to_self went
-                               //negative here?)
-                               htlc.payment_hash
-                       }
-               };
-               Ok(payment_hash)
+               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
        }
 
-       pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC) -> Result<[u8; 32], HandleError> {
+       pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", msg: None});
                }
 
-               let payment_hash = match self.remove_outbound_htlc(msg.htlc_id, None) {
-                       Err(e) => return Err(e),
-                       Ok(htlc) => {
-                               //TODO: Double-check that we didn't exceed some limits (or value_to_self went
-                               //negative here?)
-                               htlc.payment_hash
-                       }
-               };
-               Ok(payment_hash)
+               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
        }
 
-       pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Vec<PendingForwardHTLCInfo>), HandleError> {
+       pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got commitment signed message when channel was not in an operational state", msg: None});
                }
@@ -1273,34 +1403,120 @@ impl Channel {
                }
 
                for (idx, ref htlc) in local_commitment_tx.1.iter().enumerate() {
-                       let htlc_tx = self.build_htlc_transaction(&local_commitment_txid, htlc, true, &local_keys)?;
+                       let htlc_tx = self.build_htlc_transaction(&local_commitment_txid, htlc, true, &local_keys);
                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &local_keys);
                        let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
                        secp_call!(self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key), "Invalid HTLC tx siganture from peer");
                }
 
                let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number - 1)).unwrap();
-               let per_commitment_secret = chan_utils::build_commitment_secret(self.local_keys.commitment_seed, self.cur_local_commitment_transaction_number);
-
-               //TODO: Store htlc keys in our channel_watcher
+               let per_commitment_secret = chan_utils::build_commitment_secret(self.local_keys.commitment_seed, self.cur_local_commitment_transaction_number + 1);
 
                // Update state now that we've passed all the can-fail calls...
 
-               let mut to_forward_infos = Vec::new();
-               for ref mut htlc in self.pending_htlcs.iter_mut() {
+               let mut need_our_commitment = false;
+               for htlc in self.pending_htlcs.iter_mut() {
                        if htlc.state == HTLCState::RemoteAnnounced {
-                               htlc.state = HTLCState::Committed;
-                               to_forward_infos.push(htlc.pending_forward_state.take().unwrap());
+                               htlc.state = HTLCState::AwaitingRemoteRevokeToAnnounce;
+                               need_our_commitment = true;
+                       } else if htlc.state == HTLCState::RemoteRemoved {
+                               htlc.state = HTLCState::AwaitingRemoteRevokeToRemove;
+                               need_our_commitment = true;
                        }
                }
+               // Finally delete all the LocalRemovedAwaitingCommitment HTLCs
+               // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+               let mut claimed_value_msat = 0;
+               self.pending_htlcs.retain(|htlc| {
+                       if htlc.state == HTLCState::LocalRemovedAwaitingCommitment {
+                               claimed_value_msat += htlc.amount_msat;
+                               false
+                       } else { true }
+               });
+               self.value_to_self_msat += claimed_value_msat;
 
                self.cur_local_commitment_transaction_number -= 1;
 
+               let our_commitment_signed = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+                       // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+                       // we'll send one right away when we get the revoke_and_ack when we
+                       // free_holding_cell_htlcs().
+                       Some(self.send_commitment_no_status_check()?)
+               } else { None };
+
                Ok((msgs::RevokeAndACK {
                        channel_id: self.channel_id,
                        per_commitment_secret: per_commitment_secret,
                        next_per_commitment_point: next_per_commitment_point,
-               }, to_forward_infos))
+               }, our_commitment_signed))
+       }
+
+       /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
+       /// fulfilling or failing the last pending HTLC)
+       fn free_holding_cell_htlcs(&mut self) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+               if self.holding_cell_htlc_updates.len() != 0 {
+                       let mut htlc_updates = Vec::new();
+                       mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
+                       let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut err = None;
+                       for htlc_update in htlc_updates.drain(..) {
+                               // Note that this *can* fail, though it should be due to rather-rare conditions on
+                               // fee races with adding too many outputs which push our total payments just over
+                               // the limit. In case its less rare than I anticipate, we may want to revisit
+                               // handling this case better and maybe fufilling some of the HTLCs while attempting
+                               // to rebalance channels.
+                               if err.is_some() { // We're back to AwaitingRemoteRevoke (or are about to fail the channel)
+                                       self.holding_cell_htlc_updates.push(htlc_update);
+                               } else {
+                                       match &htlc_update {
+                                               &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, payment_hash, ref onion_routing_packet, ..} => {
+                                                       match self.send_htlc(amount_msat, payment_hash, cltv_expiry, onion_routing_packet.clone()) {
+                                                               Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+                                                               Err(e) => {
+                                                                       err = Some(e);
+                                                               }
+                                                       }
+                                               },
+                                               &HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage, .. } => {
+                                                       match self.get_update_fulfill_htlc(payment_preimage) {
+                                                               Ok(update_fulfill_msg_option) => update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()),
+                                                               Err(e) => {
+                                                                       err = Some(e);
+                                                               }
+                                                       }
+                                               },
+                                               &HTLCUpdateAwaitingACK::FailHTLC { payment_hash, ref err_packet } => {
+                                                       match self.get_update_fail_htlc(&payment_hash, err_packet.clone()) {
+                                                               Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()),
+                                                               Err(e) => {
+                                                                       err = Some(e);
+                                                               }
+                                                       }
+                                               },
+                                       }
+                                       if err.is_some() {
+                                               self.holding_cell_htlc_updates.push(htlc_update);
+                                       }
+                               }
+                       }
+                       //TODO: Need to examine the type of err - if its a fee issue or similar we may want to
+                       //fail it back the route, if its a temporary issue we can ignore it...
+                       match err {
+                               None => {
+                                       Ok(Some(msgs::CommitmentUpdate {
+                                               update_add_htlcs,
+                                               update_fulfill_htlcs,
+                                               update_fail_htlcs,
+                                               commitment_signed: self.send_commitment_no_status_check()?
+                                       }))
+                               },
+                               Some(e) => Err(e)
+                       }
+               } else {
+                       Ok(None)
+               }
        }
 
        /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
@@ -1308,35 +1524,86 @@ impl Channel {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
+       pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK) -> Result<(Option<msgs::CommitmentUpdate>, Vec<PendingForwardHTLCInfo>, Vec<([u8; 32], HTLCFailReason)>), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got revoke/ACK message when channel was not in an operational state", msg: None});
                }
-               if PublicKey::from_secret_key(&self.secp_ctx, &secp_call!(SecretKey::from_slice(&self.secp_ctx, &msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret")).unwrap() != self.their_cur_commitment_point {
-                       return Err(HandleError{err: "Got a revoke commitment secret which didn't correspond to their current pubkey", msg: None});
+               if let Some(their_prev_commitment_point) = self.their_prev_commitment_point {
+                       if PublicKey::from_secret_key(&self.secp_ctx, &secp_call!(SecretKey::from_slice(&self.secp_ctx, &msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret")).unwrap() != their_prev_commitment_point {
+                               return Err(HandleError{err: "Got a revoke commitment secret which didn't correspond to their current pubkey", msg: None});
+                       }
                }
-               self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number, msg.per_commitment_secret)?;
+               self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)?;
 
                // Update state now that we've passed all the can-fail calls...
                // (note that we may still fail to generate the new commitment_signed message, but that's
                // OK, we step the channel here and *then* if the new generation fails we can fail the
                // channel based on that, but stepping stuff here should be safe either way.
                self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+               self.their_prev_commitment_point = Some(self.their_cur_commitment_point);
                self.their_cur_commitment_point = msg.next_per_commitment_point;
                self.cur_remote_commitment_transaction_number -= 1;
+
+               let mut to_forward_infos = Vec::new();
+               let mut revoked_htlcs = Vec::new();
+               let mut require_commitment = false;
+               let mut value_to_self_msat_diff: i64 = 0;
+               // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+               self.pending_htlcs.retain(|htlc| {
+                       if htlc.state == HTLCState::LocalRemoved {
+                               if htlc.local_removed_fulfilled { true } else { false }
+                       } else if htlc.state == HTLCState::AwaitingRemovedRemoteRevoke {
+                               if let Some(reason) = htlc.fail_reason.clone() { // We really want take() here, but, again, non-mut ref :(
+                                       revoked_htlcs.push((htlc.payment_hash, reason));
+                               } else {
+                                       // They fulfilled, so we sent them money
+                                       value_to_self_msat_diff -= htlc.amount_msat as i64;
+                               }
+                               false
+                       } else { true }
+               });
                for htlc in self.pending_htlcs.iter_mut() {
                        if htlc.state == HTLCState::LocalAnnounced {
                                htlc.state = HTLCState::Committed;
+                       } else if htlc.state == HTLCState::AwaitingRemoteRevokeToAnnounce {
+                               htlc.state = HTLCState::AwaitingAnnouncedRemoteRevoke;
+                               require_commitment = true;
+                       } else if htlc.state == HTLCState::AwaitingAnnouncedRemoteRevoke {
+                               htlc.state = HTLCState::Committed;
+                               to_forward_infos.push(htlc.pending_forward_state.take().unwrap());
+                       } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove {
+                               htlc.state = HTLCState::AwaitingRemovedRemoteRevoke;
+                               require_commitment = true;
+                       } else if htlc.state == HTLCState::LocalRemoved {
+                               assert!(htlc.local_removed_fulfilled);
+                               htlc.state = HTLCState::LocalRemovedAwaitingCommitment;
                        }
                }
+               self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
 
-               self.free_holding_cell_htlcs()
+               match self.free_holding_cell_htlcs()? {
+                       Some(commitment_update) => {
+                               Ok((Some(commitment_update), to_forward_infos, revoked_htlcs))
+                       },
+                       None => {
+                               if require_commitment {
+                                       Ok((Some(msgs::CommitmentUpdate {
+                                               update_add_htlcs: Vec::new(),
+                                               update_fulfill_htlcs: Vec::new(),
+                                               update_fail_htlcs: Vec::new(),
+                                               commitment_signed: self.send_commitment_no_status_check()?
+                                       }), to_forward_infos, revoked_htlcs))
+                               } else {
+                                       Ok((None, to_forward_infos, revoked_htlcs))
+                               }
+                       }
+               }
        }
 
        pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
-        if self.channel_outbound {
+               if self.channel_outbound {
                        return Err(HandleError{err: "Non-funding remote tried to update channel fee", msg: None});
-        }
+               }
                Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
                self.feerate_per_kw = msg.feerate_per_kw as u64;
                Ok(())
@@ -1396,10 +1663,16 @@ impl Channel {
                // We can't send our shutdown until we've committed all of our pending HTLCs, but the
                // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
                // cell HTLCs and return them to fail the payment.
-               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlcs.len());
-               for htlc in self.holding_cell_htlcs.drain(..) {
-                       dropped_outbound_htlcs.push(htlc.payment_hash);
-               }
+               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               self.holding_cell_htlc_updates.retain(|htlc_update| {
+                       match htlc_update {
+                               &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, .. } => {
+                                       dropped_outbound_htlcs.push(payment_hash.clone());
+                                       false
+                               },
+                               _ => true
+                       }
+               });
                for htlc in self.pending_htlcs.iter() {
                        if htlc.state == HTLCState::LocalAnnounced {
                                return Ok((None, None, dropped_outbound_htlcs));
@@ -1789,6 +2062,7 @@ impl Channel {
                self.channel_state = ChannelState::FundingCreated as u32;
                let funding_txo = self.channel_monitor.get_funding_txo().unwrap();
                self.channel_id = funding_txo.0.into_be() ^ Uint256::from_u64(funding_txo.1 as u64).unwrap(); //TODO: or le?
+               self.cur_remote_commitment_transaction_number -= 1;
 
                Ok(msgs::FundingCreated {
                        temporary_channel_id: temporary_channel_id,
@@ -1852,7 +2126,7 @@ impl Channel {
                        return Err(HandleError{err: "Cannot send less than their minimum HTLC value", msg: None});
                }
 
-               let (_, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats();
+               let (_, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(false);
                if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
                        return Err(HandleError{err: "Cannot push more than their max accepted HTLCs", msg: None});
                }
@@ -1871,7 +2145,7 @@ impl Channel {
                // Now update local state:
                if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
                        //TODO: Check the limits *including* other pending holding cell HTLCs!
-                       self.holding_cell_htlcs.push(HTLCOutputAwaitingACK {
+                       self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
                                amount_msat: amount_msat,
                                payment_hash: payment_hash,
                                cltv_expiry: cltv_expiry,
@@ -1888,6 +2162,8 @@ impl Channel {
                        payment_hash: payment_hash.clone(),
                        cltv_expiry: cltv_expiry,
                        state: HTLCState::LocalAnnounced,
+                       fail_reason: None,
+                       local_removed_fulfilled: false,
                        pending_forward_state: None
                });
 
@@ -1922,9 +2198,23 @@ impl Channel {
                if !have_updates {
                        return Err(HandleError{err: "Cannot create commitment tx until we have some updates to send", msg: None});
                }
-
+               self.send_commitment_no_status_check()
+       }
+       /// Only fails in case of bad keys
+       fn send_commitment_no_status_check(&mut self) -> Result<msgs::CommitmentSigned, HandleError> {
                let funding_script = self.get_funding_redeemscript();
 
+               // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+               // fail to generate this, we still are at least at a position where upgrading their status
+               // is acceptable.
+               for htlc in self.pending_htlcs.iter_mut() {
+                       if htlc.state == HTLCState::AwaitingRemoteRevokeToAnnounce {
+                               htlc.state = HTLCState::AwaitingAnnouncedRemoteRevoke;
+                       } else if htlc.state == HTLCState::AwaitingRemoteRevokeToRemove {
+                               htlc.state = HTLCState::AwaitingRemovedRemoteRevoke;
+                       }
+               }
+
                let remote_keys = self.build_remote_transaction_keys()?;
                let remote_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, true);
                let remote_commitment_txid = remote_commitment_tx.0.txid();
@@ -1934,7 +2224,7 @@ impl Channel {
                let mut htlc_sigs = Vec::new();
 
                for ref htlc in remote_commitment_tx.1.iter() {
-                       let htlc_tx = self.build_htlc_transaction(&remote_commitment_txid, htlc, false, &remote_keys)?;
+                       let htlc_tx = self.build_htlc_transaction(&remote_commitment_txid, htlc, false, &remote_keys);
                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &remote_keys);
                        let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
                        let our_htlc_key = secp_derived_key!(chan_utils::derive_private_key(&self.secp_ctx, &remote_keys.per_commitment_point, &self.local_keys.htlc_base_key));
@@ -1958,7 +2248,7 @@ impl Channel {
        pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: [u8; 32], cltv_expiry: u32, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned)>, HandleError> {
                match self.send_htlc(amount_msat, payment_hash, cltv_expiry, onion_routing_packet)? {
                        Some(update_add_htlc) =>
-                               Ok(Some((update_add_htlc, self.send_commitment()?))),
+                               Ok(Some((update_add_htlc, self.send_commitment_no_status_check()?))),
                        None => Ok(None)
                }
        }
@@ -1988,10 +2278,16 @@ impl Channel {
                // We can't send our shutdown until we've committed all of our pending HTLCs, but the
                // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
                // cell HTLCs and return them to fail the payment.
-               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlcs.len());
-               for htlc in self.holding_cell_htlcs.drain(..) {
-                       dropped_outbound_htlcs.push(htlc.payment_hash);
-               }
+               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               self.holding_cell_htlc_updates.retain(|htlc_update| {
+                       match htlc_update {
+                               &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, .. } => {
+                                       dropped_outbound_htlcs.push(payment_hash.clone());
+                                       false
+                               },
+                               _ => true
+                       }
+               });
 
                Ok((msgs::Shutdown {
                        channel_id: self.channel_id,
@@ -2089,7 +2385,7 @@ mod tests {
                                let remote_signature = Signature::from_der(&secp_ctx, &hex_bytes($their_sig_hex).unwrap()[..]).unwrap();
 
                                let ref htlc = unsigned_tx.1[$htlc_idx];
-                               let mut htlc_tx = chan.build_htlc_transaction(&unsigned_tx.0.txid(), &htlc, true, &keys).unwrap();
+                               let mut htlc_tx = chan.build_htlc_transaction(&unsigned_tx.0.txid(), &htlc, true, &keys);
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
                                let htlc_sighash = Message::from_slice(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]).unwrap();
                                secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.b_htlc_key).unwrap();
@@ -2134,6 +2430,8 @@ mod tests {
                                cltv_expiry: 500,
                                payment_hash: [0; 32],
                                state: HTLCState::Committed,
+                               fail_reason: None,
+                               local_removed_fulfilled: false,
                                pending_forward_state: None,
                        };
                        let mut sha = Sha256::new();
@@ -2149,6 +2447,8 @@ mod tests {
                                cltv_expiry: 501,
                                payment_hash: [0; 32],
                                state: HTLCState::Committed,
+                               fail_reason: None,
+                               local_removed_fulfilled: false,
                                pending_forward_state: None,
                        };
                        let mut sha = Sha256::new();
@@ -2164,6 +2464,8 @@ mod tests {
                                cltv_expiry: 502,
                                payment_hash: [0; 32],
                                state: HTLCState::Committed,
+                               fail_reason: None,
+                               local_removed_fulfilled: false,
                                pending_forward_state: None,
                        };
                        let mut sha = Sha256::new();
@@ -2179,6 +2481,8 @@ mod tests {
                                cltv_expiry: 503,
                                payment_hash: [0; 32],
                                state: HTLCState::Committed,
+                               fail_reason: None,
+                               local_removed_fulfilled: false,
                                pending_forward_state: None,
                        };
                        let mut sha = Sha256::new();
@@ -2194,6 +2498,8 @@ mod tests {
                                cltv_expiry: 504,
                                payment_hash: [0; 32],
                                state: HTLCState::Committed,
+                               fail_reason: None,
+                               local_removed_fulfilled: false,
                                pending_forward_state: None,
                        };
                        let mut sha = Sha256::new();
index d2854ca741145eaeaefc11ab673cbe53c71d3f2c..68cfed8fd0d422cce1f17550bdefde7d129dbae1 100644 (file)
@@ -32,29 +32,57 @@ use std::collections::hash_map;
 use std::{ptr, mem};
 use std::time::{Instant,Duration};
 
-/// Stores the info we will need to send when we want to forward an HTLC onwards
-pub struct PendingForwardHTLCInfo {
-       onion_packet: Option<msgs::OnionPacket>,
-       payment_hash: [u8; 32],
-       short_channel_id: u64,
-       prev_short_channel_id: u64,
-       amt_to_forward: u64,
-       outgoing_cltv_value: u32,
-}
+mod channel_held_info {
+       use ln::msgs;
 
-#[cfg(feature = "fuzztarget")]
-impl PendingForwardHTLCInfo {
-       pub fn dummy() -> Self {
-               Self {
-                       onion_packet: None,
-                       payment_hash: [0; 32],
-                       short_channel_id: 0,
-                       prev_short_channel_id: 0,
-                       amt_to_forward: 0,
-                       outgoing_cltv_value: 0,
+       /// Stores the info we will need to send when we want to forward an HTLC onwards
+       pub struct PendingForwardHTLCInfo {
+               pub(super) onion_packet: Option<msgs::OnionPacket>,
+               pub(super) payment_hash: [u8; 32],
+               pub(super) short_channel_id: u64,
+               pub(super) prev_short_channel_id: u64,
+               pub(super) amt_to_forward: u64,
+               pub(super) outgoing_cltv_value: u32,
+       }
+
+       #[cfg(feature = "fuzztarget")]
+       impl PendingForwardHTLCInfo {
+               pub fn dummy() -> Self {
+                       Self {
+                               onion_packet: None,
+                               payment_hash: [0; 32],
+                               short_channel_id: 0,
+                               prev_short_channel_id: 0,
+                               amt_to_forward: 0,
+                               outgoing_cltv_value: 0,
+                       }
+               }
+       }
+
+       #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+       pub enum HTLCFailReason {
+               ErrorPacket {
+                       err: msgs::OnionErrorPacket,
+               },
+               Reason {
+                       failure_code: u16,
+                       data: Vec<u8>,
+               }
+       }
+
+       #[cfg(feature = "fuzztarget")]
+       impl HTLCFailReason {
+               pub fn dummy() -> Self {
+                       HTLCFailReason::Reason {
+                               failure_code: 0, data: Vec::new(),
+                       }
                }
        }
 }
+#[cfg(feature = "fuzztarget")]
+pub use self::channel_held_info::*;
+#[cfg(not(feature = "fuzztarget"))]
+pub(crate) use self::channel_held_info::*;
 
 enum PendingOutboundHTLC {
        IntermediaryHopData {
@@ -72,16 +100,6 @@ enum PendingOutboundHTLC {
        }
 }
 
-enum HTLCFailReason<'a> {
-       ErrorPacket {
-               err: &'a msgs::OnionErrorPacket,
-       },
-       Reason {
-               failure_code: u16,
-               data: &'a[u8],
-       }
-}
-
 /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
 /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
 /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
@@ -257,7 +275,7 @@ impl ChannelManager {
                };
                for payment_hash in res.1 {
                        // unknown_next_peer...I dunno who that is anymore....
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: &[0; 0] });
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
                }
                Ok(res.0)
        }
@@ -289,6 +307,7 @@ impl ChannelManager {
                res
        }
 
+       #[inline]
        fn gen_ammag_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
                let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
                hmac.input(&shared_secret[..]);
@@ -487,6 +506,7 @@ impl ChannelManager {
                packet
        }
 
+       #[inline]
        fn build_first_hop_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
                let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
                ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
@@ -706,8 +726,8 @@ impl ChannelManager {
 
                for failed_forward in failed_forwards.drain(..) {
                        match failed_forward.2 {
-                               None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: &[0;0] }),
-                               Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: &chan_update.encode()[..] }),
+                               None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: Vec::new() }),
+                               Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: chan_update.encode() }),
                        };
                }
 
@@ -722,7 +742,7 @@ impl ChannelManager {
 
        /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
        pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
-               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: &[0;0] })
+               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() })
        }
 
        fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, payment_hash: &[u8; 32], onion_error: HTLCFailReason) -> bool {
@@ -754,7 +774,7 @@ impl ChannelManager {
                        PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret } => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
-                                               let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, data).encode();
+                                               let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
                                                ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
                                        },
                                        HTLCFailReason::ErrorPacket { err } => {
@@ -762,14 +782,14 @@ impl ChannelManager {
                                        }
                                };
 
-                               let (node_id, fail_msg) = {
+                               let (node_id, fail_msgs) = {
                                        let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) {
                                                Some(chan_id) => chan_id.clone(),
                                                None => return false
                                        };
 
                                        let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
-                                       match chan.get_update_fail_htlc(payment_hash, err_packet) {
+                                       match chan.get_update_fail_htlc_and_commit(payment_hash, err_packet) {
                                                Ok(msg) => (chan.get_their_node_id(), msg),
                                                Err(_e) => {
                                                        //TODO: Do something with e?
@@ -778,12 +798,18 @@ impl ChannelManager {
                                        }
                                };
 
-                               mem::drop(channel_state);
-                               let mut pending_events = self.pending_events.lock().unwrap();
-                               pending_events.push(events::Event::SendFailHTLC {
-                                       node_id,
-                                       msg: fail_msg
-                               });
+                               match fail_msgs {
+                                       Some(msgs) => {
+                                               mem::drop(channel_state);
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(events::Event::SendFailHTLC {
+                                                       node_id,
+                                                       msg: msgs.0,
+                                                       commitment_msg: msgs.1,
+                                               });
+                                       },
+                                       None => {},
+                               }
 
                                true
                        },
@@ -793,6 +819,7 @@ impl ChannelManager {
        /// Provides a payment preimage in response to a PaymentReceived event, returning true and
        /// generating message events for the net layer to claim the payment, if possible. Thus, you
        /// should probably kick the net layer to go send messages if this returns true!
+       /// May panic if called except in response to a PaymentReceived event.
        pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
                self.claim_funds_internal(payment_preimage, true)
        }
@@ -838,15 +865,15 @@ impl ChannelManager {
                                false
                        },
                        PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, .. } => {
-                               let (node_id, fulfill_msg) = {
+                               let (node_id, fulfill_msgs, monitor) = {
                                        let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) {
                                                Some(chan_id) => chan_id.clone(),
                                                None => return false
                                        };
 
                                        let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
-                                       match chan.get_update_fulfill_htlc(payment_preimage) {
-                                               Ok(msg) => (chan.get_their_node_id(), msg),
+                                       match chan.get_update_fulfill_htlc_and_commit(payment_preimage) {
+                                               Ok(msg) => (chan.get_their_node_id(), msg, if from_user { Some(chan.channel_monitor()) } else { None }),
                                                Err(_e) => {
                                                        //TODO: Do something with e?
                                                        return false;
@@ -855,13 +882,26 @@ impl ChannelManager {
                                };
 
                                mem::drop(channel_state);
-                               let mut pending_events = self.pending_events.lock().unwrap();
-                               pending_events.push(events::Event::SendFulfillHTLC {
-                                       node_id: node_id,
-                                       msg: fulfill_msg
-                               });
+                               match fulfill_msgs {
+                                       Some(msgs) => {
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(events::Event::SendFulfillHTLC {
+                                                       node_id: node_id,
+                                                       msg: msgs.0,
+                                                       commitment_msg: msgs.1,
+                                               });
+                                       },
+                                       None => {},
+                               }
 
-                               true
+                               //TODO: It may not be possible to handle add_update_monitor fails gracefully, maybe
+                               //it should return no Err? Sadly, panic!()s instead doesn't help much :(
+                               if from_user {
+                                       match self.monitor.add_update_monitor(monitor.as_ref().unwrap().get_funding_txo().unwrap(), monitor.unwrap()) {
+                                               Ok(()) => true,
+                                               Err(_) => true,
+                                       }
+                               } else { true }
                        },
                }
        }
@@ -1057,7 +1097,7 @@ impl ChannelMessageHandler for ChannelManager {
                };
                for payment_hash in res.2 {
                        // unknown_next_peer...I dunno who that is anymore....
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: &[0; 0] });
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
                }
                Ok((res.0, res.1))
        }
@@ -1319,7 +1359,12 @@ impl ChannelMessageHandler for ChannelManager {
        }
 
        fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
-               {
+               //TODO: Delay the claimed_funds relaying just like we do outbound relay!
+               // Claim funds first, cause we don't really care if the channel we received the message on
+               // is broken, we may have enough info to get our own money!
+               self.claim_funds_internal(msg.payment_preimage.clone(), false);
+
+               let monitor = {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
@@ -1327,51 +1372,45 @@ impl ChannelMessageHandler for ChannelManager {
                                                return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
                                        }
                                        chan.update_fulfill_htlc(&msg)?;
+                                       chan.channel_monitor()
                                },
                                None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
                        }
-               }
-               //TODO: Delay the claimed_funds relaying just like we do outbound relay!
-               self.claim_funds_internal(msg.payment_preimage.clone(), false);
+               };
+               self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
                Ok(())
        }
 
        fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
                let mut channel_state = self.channel_state.lock().unwrap();
-               let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) {
+               match channel_state.by_id.get_mut(&msg.channel_id) {
                        Some(chan) => {
                                if chan.get_their_node_id() != *their_node_id {
                                        return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
                                }
-                               chan.update_fail_htlc(&msg)?
+                               chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
                        },
                        None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
-               };
-               self.fail_htlc_backwards_internal(channel_state, &payment_hash, HTLCFailReason::ErrorPacket { err: &msg.reason });
-               Ok(())
+               }
        }
 
        fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
                let mut channel_state = self.channel_state.lock().unwrap();
-               let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) {
+               match channel_state.by_id.get_mut(&msg.channel_id) {
                        Some(chan) => {
                                if chan.get_their_node_id() != *their_node_id {
                                        return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
                                }
-                               chan.update_fail_malformed_htlc(&msg)?
+                               chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
                        },
                        None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
-               };
-               self.fail_htlc_backwards_internal(channel_state, &payment_hash, HTLCFailReason::Reason { failure_code: msg.failure_code, data: &[0;0] });
-               Ok(())
+               }
        }
 
-       fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<msgs::RevokeAndACK, HandleError> {
-               let mut forward_event = None;
+       fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
                let (res, monitor) = {
                        let mut channel_state = self.channel_state.lock().unwrap();
-
-                       let ((res, mut forwarding_infos), monitor) = match channel_state.by_id.get_mut(&msg.channel_id) {
+                       match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
                                        if chan.get_their_node_id() != *their_node_id {
                                                return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
@@ -1379,13 +1418,40 @@ impl ChannelMessageHandler for ChannelManager {
                                        (chan.commitment_signed(&msg)?, chan.channel_monitor())
                                },
                                None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
-                       };
+                       }
+               };
+               //TODO: Only if we store HTLC sigs
+               self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
+
+               Ok(res)
+       }
 
+       fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+               let ((res, mut pending_forwards, mut pending_failures), monitor) = {
+                       let mut channel_state = self.channel_state.lock().unwrap();
+                       match channel_state.by_id.get_mut(&msg.channel_id) {
+                               Some(chan) => {
+                                       if chan.get_their_node_id() != *their_node_id {
+                                               return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
+                                       }
+                                       (chan.revoke_and_ack(&msg)?, chan.channel_monitor())
+                               },
+                               None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
+                       }
+               };
+               self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
+               for failure in pending_failures.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failure.0, failure.1);
+               }
+
+               let mut forward_event = None;
+               if !pending_forwards.is_empty() {
+                       let mut channel_state = self.channel_state.lock().unwrap();
                        if channel_state.forward_htlcs.is_empty() {
                                forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
                                channel_state.next_forward = forward_event.unwrap();
                        }
-                       for forward_info in forwarding_infos.drain(..) {
+                       for forward_info in pending_forwards.drain(..) {
                                match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                entry.get_mut().push(forward_info);
@@ -1395,12 +1461,7 @@ impl ChannelMessageHandler for ChannelManager {
                                        }
                                }
                        }
-
-                       (res, monitor)
-               };
-               //TODO: Only if we store HTLC sigs
-               self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
-
+               }
                match forward_event {
                        Some(time) => {
                                let mut pending_events = self.pending_events.lock().unwrap();
@@ -1414,23 +1475,6 @@ impl ChannelMessageHandler for ChannelManager {
                Ok(res)
        }
 
-       fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<(Vec<msgs::UpdateAddHTLC>, msgs::CommitmentSigned)>, HandleError> {
-               let (res, monitor) = {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       match channel_state.by_id.get_mut(&msg.channel_id) {
-                               Some(chan) => {
-                                       if chan.get_their_node_id() != *their_node_id {
-                                               return Err(HandleError{err: "Got a message for a channel from the wrong node!", msg: None})
-                                       }
-                                       (chan.revoke_and_ack(&msg)?, chan.channel_monitor())
-                               },
-                               None => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
-                       }
-               };
-               self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor)?;
-               Ok(res)
-       }
-
        fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
                let mut channel_state = self.channel_state.lock().unwrap();
                match channel_state.by_id.get_mut(&msg.channel_id) {
@@ -1541,8 +1585,9 @@ mod tests {
 
        use rand::{thread_rng,Rng};
 
-       use std::sync::{Arc, Mutex};
+       use std::collections::HashMap;
        use std::default::Default;
+       use std::sync::{Arc, Mutex};
        use std::time::Instant;
 
        fn build_test_onion_keys() -> Vec<OnionKeys> {
@@ -1695,27 +1740,36 @@ mod tests {
                assert_eq!(onion_packet_5.data, hex_bytes("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
        }
 
-       static mut CHAN_COUNT: u16 = 0;
-       fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction) {
+       fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               let chan_id = unsafe { CHAN_COUNT };
-               chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id as u32; 1]);
+               chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
                for i in 2..100 {
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
                }
        }
 
-       fn create_chan_between_nodes(node_a: &ChannelManager, chain_a: &chaininterface::ChainWatchInterfaceUtil, node_b: &ChannelManager, chain_b: &chaininterface::ChainWatchInterfaceUtil) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256) {
-               let open_chan = node_a.create_channel(node_b.get_our_node_id(), 100000, 42).unwrap();
-               let accept_chan = node_b.handle_open_channel(&node_a.get_our_node_id(), &open_chan).unwrap();
-               node_a.handle_accept_channel(&node_b.get_our_node_id(), &accept_chan).unwrap();
+       struct Node {
+               feeest: Arc<test_utils::TestFeeEstimator>,
+               chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
+               tx_broadcaster: Arc<test_utils::TestBroadcaster>,
+               chan_monitor: Arc<test_utils::TestChannelMonitor>,
+               node_id: SecretKey,
+               node: Arc<ChannelManager>,
+               router: Router,
+       }
+
+       static mut CHAN_COUNT: u32 = 0;
+       fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256, Transaction) {
+               let open_chan = node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 42).unwrap();
+               let accept_chan = node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &open_chan).unwrap();
+               node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap();
 
                let chan_id = unsafe { CHAN_COUNT };
                let tx;
                let funding_output;
 
-               let events_1 = node_a.get_and_clear_pending_events();
+               let events_1 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_1.len(), 1);
                match events_1[0] {
                        Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
@@ -1727,26 +1781,33 @@ mod tests {
                                }]};
                                funding_output = (Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0);
 
-                               node_a.funding_transaction_generated(&temporary_channel_id, funding_output.clone());
-                               //TODO: Check that we got added to chan_monitor_a!
+                               node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output.clone());
+                               let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 1);
+                               assert_eq!(added_monitors[0].0, funding_output);
+                               added_monitors.clear();
                        },
                        _ => panic!("Unexpected event"),
                }
 
-               let events_2 = node_a.get_and_clear_pending_events();
+               let events_2 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_2.len(), 1);
                let funding_signed = match events_2[0] {
                        Event::SendFundingCreated { ref node_id, ref msg } => {
-                               assert_eq!(*node_id, node_b.get_our_node_id());
-                               node_b.handle_funding_created(&node_a.get_our_node_id(), msg).unwrap()
-                               //TODO: Check that we got added to chan_monitor_b!
+                               assert_eq!(*node_id, node_b.node.get_our_node_id());
+                               let res = node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), msg).unwrap();
+                               let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 1);
+                               assert_eq!(added_monitors[0].0, funding_output);
+                               added_monitors.clear();
+                               res
                        },
                        _ => panic!("Unexpected event"),
                };
 
-               node_a.handle_funding_signed(&node_b.get_our_node_id(), &funding_signed).unwrap();
+               node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &funding_signed).unwrap();
 
-               let events_3 = node_a.get_and_clear_pending_events();
+               let events_3 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_3.len(), 1);
                match events_3[0] {
                        Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
@@ -1756,35 +1817,35 @@ mod tests {
                        _ => panic!("Unexpected event"),
                };
 
-               confirm_transaction(&chain_a, &tx);
-               let events_4 = node_a.get_and_clear_pending_events();
+               confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
+               let events_4 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_4.len(), 1);
                match events_4[0] {
                        Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
-                               assert_eq!(*node_id, node_b.get_our_node_id());
+                               assert_eq!(*node_id, node_b.node.get_our_node_id());
                                assert!(announcement_sigs.is_none());
-                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg).unwrap()
+                               node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap()
                        },
                        _ => panic!("Unexpected event"),
                };
 
                let channel_id;
 
-               confirm_transaction(&chain_b, &tx);
-               let events_5 = node_b.get_and_clear_pending_events();
+               confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
+               let events_5 = node_b.node.get_and_clear_pending_events();
                assert_eq!(events_5.len(), 1);
                let as_announcement_sigs = match events_5[0] {
                        Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
-                               assert_eq!(*node_id, node_a.get_our_node_id());
+                               assert_eq!(*node_id, node_a.node.get_our_node_id());
                                channel_id = msg.channel_id.clone();
-                               let as_announcement_sigs = node_a.handle_funding_locked(&node_b.get_our_node_id(), msg).unwrap().unwrap();
-                               node_a.handle_announcement_signatures(&node_b.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
+                               let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap();
+                               node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
                                as_announcement_sigs
                        },
                        _ => panic!("Unexpected event"),
                };
 
-               let events_6 = node_a.get_and_clear_pending_events();
+               let events_6 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_6.len(), 1);
                let (announcement, as_update) = match events_6[0] {
                        Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
@@ -1793,8 +1854,8 @@ mod tests {
                        _ => panic!("Unexpected event"),
                };
 
-               node_b.handle_announcement_signatures(&node_a.get_our_node_id(), &as_announcement_sigs).unwrap();
-               let events_7 = node_b.get_and_clear_pending_events();
+               node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap();
+               let events_7 = node_b.node.get_and_clear_pending_events();
                assert_eq!(events_7.len(), 1);
                let bs_update = match events_7[0] {
                        Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
@@ -1808,12 +1869,22 @@ mod tests {
                        CHAN_COUNT += 1;
                }
 
-               ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id)
+               ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx)
        }
 
-       fn close_channel(outbound_node: &ChannelManager, outbound_broadcaster: &test_utils::TestBroadcaster, inbound_node: &ChannelManager, inbound_broadcaster: &test_utils::TestBroadcaster, channel_id: &Uint256, close_inbound_first: bool) {
-               let (node_a, broadcaster_a) = if close_inbound_first { (inbound_node, inbound_broadcaster) } else { (outbound_node, outbound_broadcaster) };
-               let (node_b, broadcaster_b) = if close_inbound_first { (outbound_node, outbound_broadcaster) } else { (inbound_node, inbound_broadcaster) };
+       fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Uint256, Transaction) {
+               let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]);
+               for node in nodes {
+                       assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
+                       node.router.handle_channel_update(&chan_announcement.1).unwrap();
+                       node.router.handle_channel_update(&chan_announcement.2).unwrap();
+               }
+               (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
+       }
+
+       fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &Uint256, funding_tx: Transaction, close_inbound_first: bool) {
+               let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) };
+               let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
                let (tx_a, tx_b);
 
                let shutdown_a = node_a.close_channel(channel_id).unwrap();
@@ -1844,6 +1915,9 @@ mod tests {
                        tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
                }
                assert_eq!(tx_a, tx_b);
+               let mut funding_tx_map = HashMap::new();
+               funding_tx_map.insert(funding_tx.txid(), funding_tx);
+               tx_a.verify(&funding_tx_map).unwrap();
        }
 
        struct SendEvent {
@@ -1863,7 +1937,7 @@ mod tests {
        }
 
        static mut PAYMENT_COUNT: u8 = 0;
-       fn send_along_route(origin_node: &ChannelManager, route: Route, expected_route: &[&ChannelManager], recv_value: u64) -> ([u8; 32], [u8; 32]) {
+       fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
                let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] };
                unsafe { PAYMENT_COUNT += 1 };
                let our_payment_hash = {
@@ -1875,33 +1949,56 @@ mod tests {
                };
 
                let mut payment_event = {
-                       let msgs = origin_node.send_payment(route, our_payment_hash).unwrap().unwrap();
+                       let msgs = origin_node.node.send_payment(route, our_payment_hash).unwrap().unwrap();
                        SendEvent {
-                               node_id: expected_route[0].get_our_node_id(),
+                               node_id: expected_route[0].node.get_our_node_id(),
                                msgs: vec!(msgs.0),
                                commitment_msg: msgs.1,
                        }
                };
                let mut prev_node = origin_node;
 
-               for (idx, node) in expected_route.iter().enumerate() {
-                       assert_eq!(node.get_our_node_id(), payment_event.node_id);
+               for (idx, &node) in expected_route.iter().enumerate() {
+                       assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
 
-                       node.handle_update_add_htlc(&prev_node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
-                       let revoke_and_ack = node.handle_commitment_signed(&prev_node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
-                       assert!(prev_node.handle_revoke_and_ack(&node.get_our_node_id(), &revoke_and_ack).unwrap().is_none());
+                       node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+                       {
+                               let added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 0);
+                       }
+
+                       let revoke_and_ack = node.node.handle_commitment_signed(&prev_node.node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+                       {
+                               let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 1);
+                               added_monitors.clear();
+                       }
+                       assert!(prev_node.node.handle_revoke_and_ack(&node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+                       let prev_revoke_and_ack = prev_node.node.handle_commitment_signed(&node.node.get_our_node_id(), &revoke_and_ack.1.unwrap()).unwrap();
+                       {
+                               let mut added_monitors = prev_node.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 2);
+                               added_monitors.clear();
+                       }
+                       assert!(node.node.handle_revoke_and_ack(&prev_node.node.get_our_node_id(), &prev_revoke_and_ack.0).unwrap().is_none());
+                       assert!(prev_revoke_and_ack.1.is_none());
+                       {
+                               let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap();
+                               assert_eq!(added_monitors.len(), 1);
+                               added_monitors.clear();
+                       }
 
-                       let events_1 = node.get_and_clear_pending_events();
+                       let events_1 = node.node.get_and_clear_pending_events();
                        assert_eq!(events_1.len(), 1);
                        match events_1[0] {
                                Event::PendingHTLCsForwardable { .. } => { },
                                _ => panic!("Unexpected event"),
                        };
 
-                       node.channel_state.lock().unwrap().next_forward = Instant::now();
-                       node.process_pending_htlc_forward();
+                       node.node.channel_state.lock().unwrap().next_forward = Instant::now();
+                       node.node.process_pending_htlc_forward();
 
-                       let mut events_2 = node.get_and_clear_pending_events();
+                       let mut events_2 = node.node.get_and_clear_pending_events();
                        assert_eq!(events_2.len(), 1);
                        if idx == expected_route.len() - 1 {
                                match events_2[0] {
@@ -1924,26 +2021,57 @@ mod tests {
                (our_payment_preimage, our_payment_hash)
        }
 
-       fn claim_payment(origin_node: &ChannelManager, expected_route: &[&ChannelManager], our_payment_preimage: [u8; 32]) {
-               assert!(expected_route.last().unwrap().claim_funds(our_payment_preimage));
+       fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
+               assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
+               {
+                       let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap();
+                       assert_eq!(added_monitors.len(), 1);
+                       added_monitors.clear();
+               }
+
+               let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
+               macro_rules! update_fulfill_dance {
+                       ($node: expr, $prev_node: expr) => {
+                               {
+                                       $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+                                       let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap();
+                                       {
+                                               let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 2);
+                                               added_monitors.clear();
+                                       }
+                                       assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none());
+                                       let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap();
+                                       assert!(revoke_and_ack.1.is_none());
+                                       {
+                                               let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 2);
+                                               added_monitors.clear();
+                                       }
+                                       assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+                                       {
+                                               let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 1);
+                                               added_monitors.clear();
+                                       }
+                               }
+                       }
+               }
 
-               let mut expected_next_node = expected_route.last().unwrap().get_our_node_id();
+               let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
                let mut prev_node = expected_route.last().unwrap();
-               let mut next_msg = None;
                for node in expected_route.iter().rev() {
-                       assert_eq!(expected_next_node, node.get_our_node_id());
-                       match next_msg {
-                               Some(msg) => {
-                                       node.handle_update_fulfill_htlc(&prev_node.get_our_node_id(), &msg).unwrap();
-                               }, None => {}
+                       assert_eq!(expected_next_node, node.node.get_our_node_id());
+                       if next_msgs.is_some() {
+                               update_fulfill_dance!(node, prev_node);
                        }
 
-                       let events = node.get_and_clear_pending_events();
+                       let events = node.node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
                        match events[0] {
-                               Event::SendFulfillHTLC { ref node_id, ref msg } => {
+                               Event::SendFulfillHTLC { ref node_id, ref msg, ref commitment_msg } => {
                                        expected_next_node = node_id.clone();
-                                       next_msg = Some(msg.clone());
+                                       next_msgs = Some((msg.clone(), commitment_msg.clone()));
                                },
                                _ => panic!("Unexpected event"),
                        };
@@ -1951,10 +2079,10 @@ mod tests {
                        prev_node = node;
                }
 
-               assert_eq!(expected_next_node, origin_node.get_our_node_id());
-               origin_node.handle_update_fulfill_htlc(&expected_route.first().unwrap().get_our_node_id(), &next_msg.unwrap()).unwrap();
+               assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+               update_fulfill_dance!(origin_node, expected_route.first().unwrap());
 
-               let events = origin_node.get_and_clear_pending_events();
+               let events = origin_node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
                        Event::PaymentSent { payment_preimage } => {
@@ -1964,21 +2092,21 @@ mod tests {
                }
        }
 
-       fn route_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) -> ([u8; 32], [u8; 32]) {
-               let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
+       fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
+               let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
                assert_eq!(route.hops.len(), expected_route.len());
                for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
-                       assert_eq!(hop.pubkey, node.get_our_node_id());
+                       assert_eq!(hop.pubkey, node.node.get_our_node_id());
                }
 
                send_along_route(origin_node, route, expected_route, recv_value)
        }
 
-       fn route_over_limit(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) {
-               let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
+       fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
+               let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), recv_value, 142).unwrap();
                assert_eq!(route.hops.len(), expected_route.len());
                for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
-                       assert_eq!(hop.pubkey, node.get_our_node_id());
+                       assert_eq!(hop.pubkey, node.node.get_our_node_id());
                }
 
                let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] };
@@ -1991,42 +2119,68 @@ mod tests {
                        ret
                };
 
-               let err = origin_node.send_payment(route, our_payment_hash).err().unwrap();
+               let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
                assert_eq!(err.err, "Cannot send value that would put us over our max HTLC value in flight");
        }
 
-       fn send_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager], recv_value: u64) {
-               let our_payment_preimage = route_payment(origin_node, origin_router, expected_route, recv_value).0;
-               claim_payment(origin_node, expected_route, our_payment_preimage);
+       fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
+               let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
+               claim_payment(&origin, expected_route, our_payment_preimage);
        }
 
-       fn send_failed_payment(origin_node: &ChannelManager, origin_router: &Router, expected_route: &[&ChannelManager]) {
-               let route = origin_router.get_route(&expected_route.last().unwrap().get_our_node_id(), &Vec::new(), 1000000, 142).unwrap();
+       fn send_failed_payment(origin_node: &Node, expected_route: &[&Node]) {
+               let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), &Vec::new(), 1000000, 142).unwrap();
                assert_eq!(route.hops.len(), expected_route.len());
                for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
-                       assert_eq!(hop.pubkey, node.get_our_node_id());
+                       assert_eq!(hop.pubkey, node.node.get_our_node_id());
                }
                let our_payment_hash = send_along_route(origin_node, route, expected_route, 1000000).1;
 
-               assert!(expected_route.last().unwrap().fail_htlc_backwards(&our_payment_hash));
+               assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
 
-               let mut expected_next_node = expected_route.last().unwrap().get_our_node_id();
+               let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
+               macro_rules! update_fail_dance {
+                       ($node: expr, $prev_node: expr) => {
+                               {
+                                       $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+                                       let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap();
+                                       {
+                                               let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 1);
+                                               added_monitors.clear();
+                                       }
+                                       assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none());
+                                       let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap();
+                                       assert!(revoke_and_ack.1.is_none());
+                                       {
+                                               let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 2);
+                                               added_monitors.clear();
+                                       }
+                                       assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none());
+                                       {
+                                               let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                                               assert_eq!(added_monitors.len(), 1);
+                                               added_monitors.clear();
+                                       }
+                               }
+                       }
+               }
+
+               let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
                let mut prev_node = expected_route.last().unwrap();
-               let mut next_msg = None;
                for node in expected_route.iter().rev() {
-                       assert_eq!(expected_next_node, node.get_our_node_id());
-                       match next_msg {
-                               Some(msg) => {
-                                       node.handle_update_fail_htlc(&prev_node.get_our_node_id(), &msg).unwrap();
-                               }, None => {}
+                       assert_eq!(expected_next_node, node.node.get_our_node_id());
+                       if next_msgs.is_some() {
+                               update_fail_dance!(node, prev_node);
                        }
 
-                       let events = node.get_and_clear_pending_events();
+                       let events = node.node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
                        match events[0] {
-                               Event::SendFailHTLC { ref node_id, ref msg } => {
+                               Event::SendFailHTLC { ref node_id, ref msg, ref commitment_msg } => {
                                        expected_next_node = node_id.clone();
-                                       next_msg = Some(msg.clone());
+                                       next_msgs = Some((msg.clone(), commitment_msg.clone()));
                                },
                                _ => panic!("Unexpected event"),
                        };
@@ -2034,10 +2188,10 @@ mod tests {
                        prev_node = node;
                }
 
-               assert_eq!(expected_next_node, origin_node.get_our_node_id());
-               origin_node.handle_update_fail_htlc(&expected_route.first().unwrap().get_our_node_id(), &next_msg.unwrap()).unwrap();
+               assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+               update_fail_dance!(origin_node, expected_route.first().unwrap());
 
-               let events = origin_node.get_and_clear_pending_events();
+               let events = origin_node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
                        Event::PaymentFailed { payment_hash } => {
@@ -2047,193 +2201,143 @@ mod tests {
                }
        }
 
+       fn create_network(node_count: usize) -> Vec<Node> {
+               let mut nodes = Vec::new();
+               let mut rng = thread_rng();
+               let secp_ctx = Secp256k1::new();
+
+               for _ in 0..node_count {
+                       let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
+                       let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
+                       let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+                       let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
+                       let node_id = {
+                               let mut key_slice = [0; 32];
+                               rng.fill_bytes(&mut key_slice);
+                               SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
+                       };
+                       let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone()).unwrap();
+                       let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id).unwrap());
+                       nodes.push(Node { feeest, chain_monitor, tx_broadcaster, chan_monitor, node_id, node, router });
+               }
+
+               nodes
+       }
+
        #[test]
        fn fake_network_test() {
                // Simple test which builds a network of ChannelManagers, connects them to each other, and
                // tests that payments get routed and transactions broadcast in semi-reasonable ways.
-               let mut rng = thread_rng();
-               let secp_ctx = Secp256k1::new();
-
-               let feeest_1 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
-               let chain_monitor_1 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
-               let chan_monitor_1 = Arc::new(test_utils::TestChannelMonitor{});
-               let tx_broadcaster_1 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
-               let node_id_1 = {
-                       let mut key_slice = [0; 32];
-                       rng.fill_bytes(&mut key_slice);
-                       SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
-               };
-               let node_1 = ChannelManager::new(node_id_1.clone(), 0, true, Network::Testnet, feeest_1.clone(), chan_monitor_1.clone(), chain_monitor_1.clone(), tx_broadcaster_1.clone()).unwrap();
-               let router_1 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_1).unwrap());
-
-               let feeest_2 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
-               let chain_monitor_2 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
-               let chan_monitor_2 = Arc::new(test_utils::TestChannelMonitor{});
-               let tx_broadcaster_2 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
-               let node_id_2 = {
-                       let mut key_slice = [0; 32];
-                       rng.fill_bytes(&mut key_slice);
-                       SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
-               };
-               let node_2 = ChannelManager::new(node_id_2.clone(), 0, true, Network::Testnet, feeest_2.clone(), chan_monitor_2.clone(), chain_monitor_2.clone(), tx_broadcaster_2.clone()).unwrap();
-               let router_2 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_2).unwrap());
-
-               let feeest_3 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
-               let chain_monitor_3 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
-               let chan_monitor_3 = Arc::new(test_utils::TestChannelMonitor{});
-               let tx_broadcaster_3 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
-               let node_id_3 = {
-                       let mut key_slice = [0; 32];
-                       rng.fill_bytes(&mut key_slice);
-                       SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
-               };
-               let node_3 = ChannelManager::new(node_id_3.clone(), 0, true, Network::Testnet, feeest_3.clone(), chan_monitor_3.clone(), chain_monitor_3.clone(), tx_broadcaster_3.clone()).unwrap();
-               let router_3 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_3).unwrap());
-
-               let feeest_4 = Arc::new(test_utils::TestFeeEstimator { sat_per_vbyte: 1 });
-               let chain_monitor_4 = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
-               let chan_monitor_4 = Arc::new(test_utils::TestChannelMonitor{});
-               let tx_broadcaster_4 = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
-               let node_id_4 = {
-                       let mut key_slice = [0; 32];
-                       rng.fill_bytes(&mut key_slice);
-                       SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
-               };
-               let node_4 = ChannelManager::new(node_id_4.clone(), 0, true, Network::Testnet, feeest_4.clone(), chan_monitor_4.clone(), chain_monitor_4.clone(), tx_broadcaster_4.clone()).unwrap();
-               let router_4 = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id_4).unwrap());
+               let nodes = create_network(4);
 
                // Create some initial channels
-               let chan_announcement_1 = create_chan_between_nodes(&node_1, &chain_monitor_1, &node_2, &chain_monitor_2);
-               for router in vec!(&router_1, &router_2, &router_3, &router_4) {
-                       assert!(router.handle_channel_announcement(&chan_announcement_1.0).unwrap());
-                       router.handle_channel_update(&chan_announcement_1.1).unwrap();
-                       router.handle_channel_update(&chan_announcement_1.2).unwrap();
-               }
-               let chan_announcement_2 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_3, &chain_monitor_3);
-               for router in vec!(&router_1, &router_2, &router_3, &router_4) {
-                       assert!(router.handle_channel_announcement(&chan_announcement_2.0).unwrap());
-                       router.handle_channel_update(&chan_announcement_2.1).unwrap();
-                       router.handle_channel_update(&chan_announcement_2.2).unwrap();
-               }
-               let chan_announcement_3 = create_chan_between_nodes(&node_3, &chain_monitor_3, &node_4, &chain_monitor_4);
-               for router in vec!(&router_1, &router_2, &router_3, &router_4) {
-                       assert!(router.handle_channel_announcement(&chan_announcement_3.0).unwrap());
-                       router.handle_channel_update(&chan_announcement_3.1).unwrap();
-                       router.handle_channel_update(&chan_announcement_3.2).unwrap();
-               }
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+               let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
 
                // Rebalance the network a bit by relaying one payment through all the channels...
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..], 8000000);
+               send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+               send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+               send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+               send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
 
                // Send some more payments
-               send_payment(&node_2, &router_2, &vec!(&*node_3, &*node_4)[..], 1000000);
-               send_payment(&node_4, &router_4, &vec!(&*node_3, &*node_2, &*node_1)[..], 1000000);
-               send_payment(&node_4, &router_4, &vec!(&*node_3, &*node_2)[..], 1000000);
+               send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
+               send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
+               send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
 
                // Test failure packets
-               send_failed_payment(&node_1, &router_1, &vec!(&*node_2, &*node_3, &*node_4)[..]);
+               send_failed_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..]);
 
                // Add a new channel that skips 3
-               let chan_announcement_4 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_4, &chain_monitor_4);
-               for router in vec!(&router_1, &router_2, &router_3, &router_4) {
-                       assert!(router.handle_channel_announcement(&chan_announcement_4.0).unwrap());
-                       router.handle_channel_update(&chan_announcement_4.1).unwrap();
-                       router.handle_channel_update(&chan_announcement_4.2).unwrap();
-               }
+               let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
 
-               send_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 1000000);
-               send_payment(&node_3, &router_3, &vec!(&*node_4)[..], 1000000);
-               send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
-               send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
-               send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
-               send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
-               send_payment(&node_2, &router_2, &vec!(&*node_4)[..], 8000000);
+               send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
+               send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
+               send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+               send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+               send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+               send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+               send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
 
                // Do some rebalance loop payments, simultaneously
                let mut hops = Vec::with_capacity(3);
                hops.push(RouteHop {
-                       pubkey: node_3.get_our_node_id(),
-                       short_channel_id: chan_announcement_2.1.contents.short_channel_id,
+                       pubkey: nodes[2].node.get_our_node_id(),
+                       short_channel_id: chan_2.0.contents.short_channel_id,
                        fee_msat: 0,
-                       cltv_expiry_delta: chan_announcement_3.1.contents.cltv_expiry_delta as u32
+                       cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
                });
                hops.push(RouteHop {
-                       pubkey: node_4.get_our_node_id(),
-                       short_channel_id: chan_announcement_3.1.contents.short_channel_id,
+                       pubkey: nodes[3].node.get_our_node_id(),
+                       short_channel_id: chan_3.0.contents.short_channel_id,
                        fee_msat: 0,
-                       cltv_expiry_delta: chan_announcement_4.2.contents.cltv_expiry_delta as u32
+                       cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
                });
                hops.push(RouteHop {
-                       pubkey: node_2.get_our_node_id(),
-                       short_channel_id: chan_announcement_4.1.contents.short_channel_id,
+                       pubkey: nodes[1].node.get_our_node_id(),
+                       short_channel_id: chan_4.0.contents.short_channel_id,
                        fee_msat: 1000000,
                        cltv_expiry_delta: 142,
                });
-               hops[1].fee_msat = chan_announcement_4.2.contents.fee_base_msat as u64 + chan_announcement_4.2.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
-               hops[0].fee_msat = chan_announcement_3.1.contents.fee_base_msat as u64 + chan_announcement_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-               let payment_preimage_1 = send_along_route(&node_2, Route { hops }, &vec!(&*node_3, &*node_4, &*node_2)[..], 1000000).0;
+               hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+               hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+               let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
 
                let mut hops = Vec::with_capacity(3);
                hops.push(RouteHop {
-                       pubkey: node_4.get_our_node_id(),
-                       short_channel_id: chan_announcement_4.1.contents.short_channel_id,
+                       pubkey: nodes[3].node.get_our_node_id(),
+                       short_channel_id: chan_4.0.contents.short_channel_id,
                        fee_msat: 0,
-                       cltv_expiry_delta: chan_announcement_3.2.contents.cltv_expiry_delta as u32
+                       cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
                });
                hops.push(RouteHop {
-                       pubkey: node_3.get_our_node_id(),
-                       short_channel_id: chan_announcement_3.1.contents.short_channel_id,
+                       pubkey: nodes[2].node.get_our_node_id(),
+                       short_channel_id: chan_3.0.contents.short_channel_id,
                        fee_msat: 0,
-                       cltv_expiry_delta: chan_announcement_2.2.contents.cltv_expiry_delta as u32
+                       cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
                });
                hops.push(RouteHop {
-                       pubkey: node_2.get_our_node_id(),
-                       short_channel_id: chan_announcement_2.1.contents.short_channel_id,
+                       pubkey: nodes[1].node.get_our_node_id(),
+                       short_channel_id: chan_2.0.contents.short_channel_id,
                        fee_msat: 1000000,
                        cltv_expiry_delta: 142,
                });
-               hops[1].fee_msat = chan_announcement_2.2.contents.fee_base_msat as u64 + chan_announcement_2.2.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
-               hops[0].fee_msat = chan_announcement_3.2.contents.fee_base_msat as u64 + chan_announcement_3.2.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-               let payment_preimage_2 = send_along_route(&node_2, Route { hops }, &vec!(&*node_4, &*node_3, &*node_2)[..], 1000000).0;
+               hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+               hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+               let payment_preimage_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).0;
 
                // Claim the rebalances...
-               claim_payment(&node_2, &vec!(&*node_4, &*node_3, &*node_2)[..], payment_preimage_2);
-               claim_payment(&node_2, &vec!(&*node_3, &*node_4, &*node_2)[..], payment_preimage_1);
+               claim_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_preimage_2);
+               claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
 
                // Add a duplicate new channel from 2 to 4
-               let chan_announcement_5 = create_chan_between_nodes(&node_2, &chain_monitor_2, &node_4, &chain_monitor_4);
-               for router in vec!(&router_1, &router_2, &router_3, &router_4) {
-                       assert!(router.handle_channel_announcement(&chan_announcement_5.0).unwrap());
-                       router.handle_channel_update(&chan_announcement_5.1).unwrap();
-                       router.handle_channel_update(&chan_announcement_5.2).unwrap();
-               }
+               let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
 
                // Send some payments across both channels
-               let payment_preimage_3 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
-               let payment_preimage_4 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
-               let payment_preimage_5 = route_payment(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000).0;
+               let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+               let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+               let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 
-               route_over_limit(&node_1, &router_1, &vec!(&*node_2, &*node_4)[..], 3000000);
+               route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
 
                //TODO: Test that routes work again here as we've been notified that the channel is full
 
-               claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_3);
-               claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_4);
-               claim_payment(&node_1, &vec!(&*node_2, &*node_4)[..], payment_preimage_5);
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
 
                // Close down the channels...
-               close_channel(&node_1, &tx_broadcaster_1, &node_2, &tx_broadcaster_2, &chan_announcement_1.3, true);
-               close_channel(&node_2, &tx_broadcaster_2, &node_3, &tx_broadcaster_3, &chan_announcement_2.3, false);
-               close_channel(&node_3, &tx_broadcaster_3, &node_4, &tx_broadcaster_4, &chan_announcement_3.3, true);
-               close_channel(&node_2, &tx_broadcaster_2, &node_4, &tx_broadcaster_4, &chan_announcement_4.3, false);
+               close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+               close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+               close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+               close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+               close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
 
                // Check that we processed all pending events
-               for node in vec!(&node_1, &node_2, &node_3, &node_4) {
-                       assert_eq!(node.get_and_clear_pending_events().len(), 0);
+               for node in nodes {
+                       assert_eq!(node.node.get_and_clear_pending_events().len(), 0);
+                       assert_eq!(node.chan_monitor.added_monitors.lock().unwrap().len(), 0);
                }
        }
 }
index 53440f4361341c7af9d5d441ebca3559030698e4..3021163f9e7271493a4c9d30ea7e98413640091c 100644 (file)
@@ -270,6 +270,12 @@ impl ChannelMonitor {
                min
        }
 
+       pub fn provide_payment_preimage(&mut self, payment_preimage: &[u8; 32]) {
+               //TODO: Some kind of timeout here or ability to mark all states containing this preimage
+               //revoked?
+               self.payment_preimages.push(payment_preimage.clone());
+       }
+
        #[inline]
        fn check_spend_transaction(&self, tx: &Transaction, height: u32) -> Vec<Transaction> {
                // Most secp and related errors trying to create keys means we have no hope of constructing
@@ -329,7 +335,7 @@ impl ChannelMonitor {
                                        total_value += tx.output[per_commitment_data.revoked_output_index as usize].value;
 
                                        for &(ref htlc, ref _next_tx_sig) in per_commitment_data.htlcs.iter() {
-                                               let expected_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey, htlc.offered);
+                                               let expected_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey);
                                                if htlc.transaction_output_index as usize >= tx.output.len() ||
                                                                tx.output[htlc.transaction_output_index as usize].value != htlc.amount_msat / 1000 ||
                                                                tx.output[htlc.transaction_output_index as usize].script_pubkey != expected_script.to_v0_p2wsh() {
@@ -420,7 +426,7 @@ impl ChannelMonitor {
 
                                                let sig = match self.revocation_base_key {
                                                        RevocationStorage::PrivMode { ref revocation_base_key } => {
-                                                               let htlc_redeemscript = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey, htlc.offered);
+                                                               let htlc_redeemscript = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey);
                                                                let sighash = ignore_error!(Message::from_slice(&sighash_parts.sighash_all(&input, &htlc_redeemscript, values_drain.next().unwrap())[..]));
 
                                                                let revocation_key = ignore_error!(chan_utils::derive_private_revocation_key(&self.secp_ctx, &per_commitment_key, &revocation_base_key));
index e85f25ac78a23248690d520a586af5cb1cca9fb5..c48e0568169c121e6f434df99dc5ec495eecd329 100644 (file)
@@ -357,6 +357,15 @@ pub struct HandleError { //TODO: rename me
        pub msg: Option<ErrorAction>, //TODO: Make this required and rename it
 }
 
+/// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment
+/// transaction updates if they were pending.
+pub struct CommitmentUpdate {
+       pub update_add_htlcs: Vec<UpdateAddHTLC>,
+       pub update_fulfill_htlcs: Vec<UpdateFulfillHTLC>,
+       pub update_fail_htlcs: Vec<UpdateFailHTLC>,
+       pub commitment_signed: CommitmentSigned,
+}
+
 /// A trait to describe an object which can receive channel messages. Messages MAY be called in
 /// paralell when they originate from different their_node_ids, however they MUST NOT be called in
 /// paralell when the two calls have the same their_node_id.
@@ -377,8 +386,8 @@ pub trait ChannelMessageHandler : events::EventsProvider {
        fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), HandleError>;
        fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), HandleError>;
        fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), HandleError>;
-       fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<RevokeAndACK, HandleError>;
-       fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<Option<(Vec<UpdateAddHTLC>, CommitmentSigned)>, HandleError>;
+       fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(RevokeAndACK, Option<CommitmentSigned>), HandleError>;
+       fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<Option<CommitmentUpdate>, HandleError>;
 
        fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), HandleError>;
 
@@ -737,7 +746,11 @@ impl MsgDecodable for Shutdown {
 }
 impl MsgEncodable for Shutdown {
        fn encode(&self) -> Vec<u8> {
-               unimplemented!();
+               let mut res = Vec::with_capacity(32 + 2 + self.scriptpubkey.len());
+               res.extend_from_slice(&serialize(&self.channel_id).unwrap());
+               res.extend_from_slice(&byte_utils::be16_to_array(self.scriptpubkey.len() as u16));
+               res.extend_from_slice(&self.scriptpubkey[..]);
+               res
        }
 }
 
@@ -756,7 +769,12 @@ impl MsgDecodable for ClosingSigned {
 }
 impl MsgEncodable for ClosingSigned {
        fn encode(&self) -> Vec<u8> {
-               unimplemented!();
+               let mut res = Vec::with_capacity(32+8+64);
+               res.extend_from_slice(&serialize(&self.channel_id).unwrap());
+               res.extend_from_slice(&byte_utils::be64_to_array(self.fee_satoshis));
+               let secp_ctx = Secp256k1::without_caps();
+               res.extend_from_slice(&self.signature.serialize_compact(&secp_ctx));
+               res
        }
 }
 
index 01081845ae8630f9c6540fe30f706f6dbe4238c0..d7b1e9d691238f34161c53d6c0e1eebf5ba35606 100644 (file)
@@ -433,18 +433,27 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
 
                                                                                        132 => {
                                                                                                let msg = try_potential_decodeerror!(msgs::CommitmentSigned::decode(&msg_data[2..]));
-                                                                                               let resp = try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg));
-                                                                                               encode_and_send_msg!(resp, 133);
+                                                                                               let resps = try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg));
+                                                                                               encode_and_send_msg!(resps.0, 133);
+                                                                                               if let Some(resp) = resps.1 {
+                                                                                                       encode_and_send_msg!(resp, 132);
+                                                                                               }
                                                                                        },
                                                                                        133 => {
                                                                                                let msg = try_potential_decodeerror!(msgs::RevokeAndACK::decode(&msg_data[2..]));
                                                                                                let resp_option = try_potential_handleerror!(self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg));
                                                                                                match resp_option {
                                                                                                        Some(resps) => {
-                                                                                                               for resp in resps.0 {
+                                                                                                               for resp in resps.update_add_htlcs {
                                                                                                                        encode_and_send_msg!(resp, 128);
                                                                                                                }
-                                                                                                               encode_and_send_msg!(resps.1, 132);
+                                                                                                               for resp in resps.update_fulfill_htlcs {
+                                                                                                                       encode_and_send_msg!(resp, 130);
+                                                                                                               }
+                                                                                                               for resp in resps.update_fail_htlcs {
+                                                                                                                       encode_and_send_msg!(resp, 131);
+                                                                                                               }
+                                                                                                               encode_and_send_msg!(resps.commitment_signed, 132);
                                                                                                        },
                                                                                                        None => {},
                                                                                                }
@@ -581,19 +590,21 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
                                                Self::do_attempt_write_data(&mut descriptor, peer);
                                                continue;
                                        },
-                                       Event::SendFulfillHTLC { ref node_id, ref msg } => {
+                                       Event::SendFulfillHTLC { ref node_id, ref msg, ref commitment_msg } => {
                                                let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
                                                                //TODO: Do whatever we're gonna do for handling dropped messages
                                                        });
                                                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 130)));
+                                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_msg, 132)));
                                                Self::do_attempt_write_data(&mut descriptor, peer);
                                                continue;
                                        },
-                                       Event::SendFailHTLC { ref node_id, ref msg } => {
+                                       Event::SendFailHTLC { ref node_id, ref msg, ref commitment_msg } => {
                                                let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
                                                                //TODO: Do whatever we're gonna do for handling dropped messages
                                                        });
                                                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 131)));
+                                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_msg, 132)));
                                                Self::do_attempt_write_data(&mut descriptor, peer);
                                                continue;
                                        },
index 9cf25ec38e15b23439d82a8f18c6c446352cb846..db4769067e35ba95b5c0a1d5038c9de3b7d60378 100644 (file)
@@ -76,11 +76,13 @@ pub enum Event {
        SendFulfillHTLC {
                node_id: PublicKey,
                msg: msgs::UpdateFulfillHTLC,
+               commitment_msg: msgs::CommitmentSigned,
        },
        /// Used to indicate that we need to fail an htlc from the peer with the given node_id.
        SendFailHTLC {
                node_id: PublicKey,
                msg: msgs::UpdateFailHTLC,
+               commitment_msg: msgs::CommitmentSigned,
        },
        /// Used to indicate that a channel_announcement and channel_update should be broadcast to all
        /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
diff --git a/src/util/rust_crypto_nonstd_arch.c b/src/util/rust_crypto_nonstd_arch.c
new file mode 100644 (file)
index 0000000..f507658
--- /dev/null
@@ -0,0 +1,13 @@
+#include <stdint.h>
+#include <stdlib.h>
+
+uint32_t rust_crypto_util_fixed_time_eq_asm(uint8_t* lhsp, uint8_t* rhsp, size_t count) {
+       if (count == 0) {
+               return 1;
+       }
+       uint8_t result = 0;
+       for (size_t i = 0; i < count; i++) {
+               result |= (lhsp[i] ^ rhsp[i]);
+       }
+       return result;
+}
index 5d657b13df585922626d37c9950d303838c3d8ae..7e28f54374051d13035823b73e38e1eebf9ab6ba 100644 (file)
@@ -6,7 +6,7 @@ use ln::msgs::HandleError;
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::util::hash::Sha256dHash;
 
-use std::sync::Mutex;
+use std::sync::{Arc,Mutex};
 
 pub struct TestFeeEstimator {
        pub sat_per_vbyte: u64,
@@ -18,12 +18,21 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
 }
 
 pub struct TestChannelMonitor {
-
+       pub added_monitors: Mutex<Vec<((Sha256dHash, u16), channelmonitor::ChannelMonitor)>>,
+       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<(Sha256dHash, u16)>>,
+}
+impl TestChannelMonitor {
+       pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>) -> Self {
+               Self {
+                       added_monitors: Mutex::new(Vec::new()),
+                       simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster),
+               }
+       }
 }
 impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
-       fn add_update_monitor(&self, _funding_txo: (Sha256dHash, u16), _monitor: channelmonitor::ChannelMonitor) -> Result<(), HandleError> {
-               //TODO!
-               Ok(())
+       fn add_update_monitor(&self, funding_txo: (Sha256dHash, u16), monitor: channelmonitor::ChannelMonitor) -> Result<(), HandleError> {
+               self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
+               self.simple_monitor.add_update_monitor(funding_txo, monitor)
        }
 }