Return channel_updates when failing a HTLC for fee/CLTV reasons
[rust-lightning] / src / ln / channelmanager.rs
index 67837af719c31829452ebff10849ae6c0f4da80d..27f470d40f8a0e6182b677ac925dd20687cd21ca 100644 (file)
@@ -19,13 +19,15 @@ use ln::msgs;
 use ln::msgs::{HandleError,ChannelMessageHandler,MsgEncodable,MsgDecodable};
 use util::{byte_utils, events, internal_traits, rng};
 use util::sha2::Sha256;
+use util::chacha20poly1305rfc::ChaCha20;
+use util::logger::Logger;
+use util::errors::APIError;
 
 use crypto;
 use crypto::mac::{Mac,MacResult};
 use crypto::hmac::Hmac;
 use crypto::digest::Digest;
 use crypto::symmetriccipher::SynchronousStreamCipher;
-use crypto::chacha20::ChaCha20;
 
 use std::{ptr, mem};
 use std::collections::HashMap;
@@ -115,14 +117,19 @@ struct ChannelHolder {
        short_to_id: HashMap<u64, [u8; 32]>,
        next_forward: Instant,
        /// short channel id -> forward infos. Key of 0 means payments received
+       /// Note that while this is held in the same mutex as the channels themselves, no consistency
+       /// guarantees are made about there existing a channel with the short id here, nor the short
+       /// ids in the PendingForwardHTLCInfo!
        forward_htlcs: HashMap<u64, Vec<PendingForwardHTLCInfo>>,
+       /// Note that while this is held in the same mutex as the channels themselves, no consistency
+       /// guarantees are made about the channels given here actually existing anymore by the time you
+       /// go to read them!
        claimable_htlcs: HashMap<[u8; 32], PendingOutboundHTLC>,
 }
 struct MutChannelHolder<'a> {
        by_id: &'a mut HashMap<[u8; 32], Channel>,
        short_to_id: &'a mut HashMap<u64, [u8; 32]>,
        next_forward: &'a mut Instant,
-       /// short channel id -> forward infos. Key of 0 means payments received
        forward_htlcs: &'a mut HashMap<u64, Vec<PendingForwardHTLCInfo>>,
        claimable_htlcs: &'a mut HashMap<[u8; 32], PendingOutboundHTLC>,
 }
@@ -132,13 +139,15 @@ impl ChannelHolder {
                        by_id: &mut self.by_id,
                        short_to_id: &mut self.short_to_id,
                        next_forward: &mut self.next_forward,
-                       /// short channel id -> forward infos. Key of 0 means payments received
                        forward_htlcs: &mut self.forward_htlcs,
                        claimable_htlcs: &mut self.claimable_htlcs,
                }
        }
 }
 
+#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
+const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
+
 /// Manager which keeps track of a number of channels and sends messages to the appropriate
 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
@@ -152,13 +161,15 @@ pub struct ChannelManager {
 
        announce_channels_publicly: bool,
        fee_proportional_millionths: u32,
-       latest_block_height: AtomicUsize, //TODO: Compile-time assert this is at least 32-bits long
+       latest_block_height: AtomicUsize,
        secp_ctx: Secp256k1,
 
        channel_state: Mutex<ChannelHolder>,
        our_network_key: SecretKey,
 
        pending_events: Mutex<Vec<events::Event>>,
+
+       logger: Arc<Logger>,
 }
 
 const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
@@ -204,7 +215,7 @@ impl ChannelManager {
        /// fee_proportional_millionths is an optional fee to charge any payments routed through us.
        /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
        /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
-       pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
+       pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
                let secp_ctx = Secp256k1::new();
 
                let res = Arc::new(ChannelManager {
@@ -229,6 +240,8 @@ impl ChannelManager {
                        our_network_key,
 
                        pending_events: Mutex::new(Vec::new()),
+
+                       logger,
                });
                let weak_res = Arc::downgrade(&res);
                res.chain_monitor.register_listener(weak_res);
@@ -242,7 +255,8 @@ impl ChannelManager {
        /// may wish to avoid using 0 for user_id here.
        /// If successful, will generate a SendOpenChannel event, so you should probably poll
        /// PeerManager::process_events afterwards.
-       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, user_id: u64) -> Result<(), HandleError> {
+       /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
+       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
                let chan_keys = if cfg!(feature = "fuzztarget") {
                        ChannelKeys {
                                funding_key:               SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
@@ -263,7 +277,7 @@ impl ChannelManager {
                        }
                };
 
-               let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, self.announce_channels_publicly, user_id);
+               let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
                let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator)?;
                let mut channel_state = self.channel_state.lock().unwrap();
                match channel_state.by_id.insert(channel.channel_id(), channel) {
@@ -360,6 +374,55 @@ impl ChannelManager {
                Ok(())
        }
 
+       #[inline]
+       fn finish_force_close_channel(&self, shutdown_res: (Vec<Transaction>, Vec<[u8; 32]>)) {
+               let (local_txn, failed_htlcs) = shutdown_res;
+               for payment_hash in failed_htlcs {
+                       // unknown_next_peer...I dunno who that is anymore....
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+               }
+               for tx in local_txn {
+                       self.tx_broadcaster.broadcast_transaction(&tx);
+               }
+               //TODO: We need to have a way where outbound HTLC claims can result in us claiming the
+               //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
+               //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
+               //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
+               //timeouts are hit and our claims confirm).
+       }
+
+       /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
+       /// the chain and rejecting new HTLCs on the given channel.
+       pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
+               let mut chan = {
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = channel_state_lock.borrow_parts();
+                       if let Some(chan) = channel_state.by_id.remove(channel_id) {
+                               if let Some(short_id) = chan.get_short_channel_id() {
+                                       channel_state.short_to_id.remove(&short_id);
+                               }
+                               chan
+                       } else {
+                               return;
+                       }
+               };
+               self.finish_force_close_channel(chan.force_shutdown());
+               let mut events = self.pending_events.lock().unwrap();
+               if let Ok(update) = self.get_channel_update(&chan) {
+                       events.push(events::Event::BroadcastChannelUpdate {
+                               msg: update
+                       });
+               }
+       }
+
+       /// Force close all channels, immediately broadcasting the latest local commitment transaction
+       /// for each to the chain and rejecting new HTLCs on each.
+       pub fn force_close_all_channels(&self) {
+               for chan in self.list_channels() {
+                       self.force_close_channel(&chan.channel_id);
+               }
+       }
+
        #[inline]
        fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) {
                ({
@@ -717,7 +780,10 @@ impl ChannelManager {
 
        /// Call this upon creation of a funding transaction for the given channel.
        /// Panics if a funding transaction has already been provided for this channel.
+       /// May panic if the funding_txo is duplicative with some other channel (note that this should
+       /// be trivially prevented by using unique funding transaction keys per-channel).
        pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
+
                macro_rules! add_pending_event {
                        ($event: expr) => {
                                {
@@ -736,12 +802,12 @@ impl ChannelManager {
                                                        (chan, funding_msg.0, funding_msg.1)
                                                },
                                                Err(e) => {
+                                                       log_error!(self, "Got bad signatures: {}!", e.err);
                                                        mem::drop(channel_state);
-                                                       add_pending_event!(events::Event::DisconnectPeer {
+                                                       add_pending_event!(events::Event::HandleError {
                                                                node_id: chan.get_their_node_id(),
-                                                               msg: if let Some(msgs::ErrorAction::DisconnectPeer { msg } ) = e.action { msg } else { None },
+                                                               action: e.action,
                                                        });
-
                                                        return;
                                                },
                                        }
@@ -758,7 +824,14 @@ impl ChannelManager {
                });
 
                let mut channel_state = self.channel_state.lock().unwrap();
-               channel_state.by_id.insert(chan.channel_id(), chan);
+               match channel_state.by_id.entry(chan.channel_id()) {
+                       hash_map::Entry::Occupied(_) => {
+                               panic!("Generated duplicate funding txid?");
+                       },
+                       hash_map::Entry::Vacant(e) => {
+                               e.insert(chan);
+                       }
+               }
        }
 
        fn get_announcement_sigs(&self, chan: &Channel) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
@@ -832,7 +905,7 @@ impl ChannelManager {
                                        if !add_htlc_msgs.is_empty() {
                                                let (commitment_msg, monitor) = match forward_chan.send_commitment() {
                                                        Ok(res) => res,
-                                                       Err(_) => {
+                                                       Err(_e) => {
                                                                //TODO: Handle...this is bad!
                                                                continue;
                                                        },
@@ -884,6 +957,12 @@ impl ChannelManager {
                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() })
        }
 
+       /// Fails an HTLC backwards to the sender of it to us.
+       /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
+       /// There are several callsites that do stupid things like loop over a list of payment_hashes
+       /// to fail and take the channel_state lock for each iteration (as we take ownership and may
+       /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
+       /// still-available channels.
        fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, payment_hash: &[u8; 32], onion_error: HTLCFailReason) -> bool {
                let mut pending_htlc = {
                        match channel_state.claimable_htlcs.remove(payment_hash) {
@@ -904,7 +983,7 @@ impl ChannelManager {
                }
 
                match pending_htlc {
-                       PendingOutboundHTLC::CycledRoute { .. } => { panic!("WAT"); },
+                       PendingOutboundHTLC::CycledRoute { .. } => unreachable!(),
                        PendingOutboundHTLC::OutboundRoute { .. } => {
                                mem::drop(channel_state);
 
@@ -950,6 +1029,7 @@ impl ChannelManager {
                                                }
 
                                                let mut pending_events = self.pending_events.lock().unwrap();
+                                               //TODO: replace by HandleError ? UpdateFailHTLC in handle_update_add_htlc need also to build a CommitmentSigned
                                                pending_events.push(events::Event::SendFailHTLC {
                                                        node_id,
                                                        msg: msg,
@@ -1000,7 +1080,7 @@ impl ChannelManager {
                }
 
                match pending_htlc {
-                       PendingOutboundHTLC::CycledRoute { .. } => { panic!("WAT"); },
+                       PendingOutboundHTLC::CycledRoute { .. } => unreachable!(),
                        PendingOutboundHTLC::OutboundRoute { .. } => {
                                if from_user {
                                        panic!("Called claim_funds with a preimage for an outgoing payment. There is nothing we can do with this, and something is seriously wrong if you knew this...");
@@ -1016,13 +1096,20 @@ impl ChannelManager {
                                let (node_id, fulfill_msgs) = {
                                        let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) {
                                                Some(chan_id) => chan_id.clone(),
-                                               None => return false
+                                               None => {
+                                                       // TODO: There is probably a channel manager somewhere that needs to
+                                                       // learn the preimage as the channel already hit the chain and that's
+                                                       // why its missing.
+                                                       return false
+                                               }
                                        };
 
                                        let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
                                        match chan.get_update_fulfill_htlc_and_commit(payment_preimage) {
                                                Ok(msg) => (chan.get_their_node_id(), msg),
                                                Err(_e) => {
+                                                       // TODO: There is probably a channel manager somewhere that needs to
+                                                       // learn the preimage as the channel may be about to hit the chain.
                                                        //TODO: Do something with e?
                                                        return false;
                                                },
@@ -1074,15 +1161,18 @@ impl events::EventsProvider for ChannelManager {
 impl ChainListener for ChannelManager {
        fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
                let mut new_events = Vec::new();
+               let mut failed_channels = Vec::new();
                {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       let mut short_to_ids_to_insert = Vec::new();
-                       let mut short_to_ids_to_remove = Vec::new();
+                       let mut channel_lock = self.channel_state.lock().unwrap();
+                       let channel_state = channel_lock.borrow_parts();
+                       let short_to_id = channel_state.short_to_id;
                        channel_state.by_id.retain(|_, channel| {
-                               if let Some(funding_locked) = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched) {
+                               let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
+                               if let Ok(Some(funding_locked)) = chan_res {
                                        let announcement_sigs = match self.get_announcement_sigs(channel) {
                                                Ok(res) => res,
-                                               Err(_e) => {
+                                               Err(e) => {
+                                                       log_error!(self, "Got error handling message: {}!", e.err);
                                                        //TODO: push e on events and blow up the channel (it has bad keys)
                                                        return true;
                                                }
@@ -1092,16 +1182,27 @@ impl ChainListener for ChannelManager {
                                                msg: funding_locked,
                                                announcement_sigs: announcement_sigs
                                        });
-                                       short_to_ids_to_insert.push((channel.get_short_channel_id().unwrap(), channel.channel_id()));
+                                       short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+                               } else if let Err(e) = chan_res {
+                                       new_events.push(events::Event::HandleError {
+                                               node_id: channel.get_their_node_id(),
+                                               action: e.action,
+                                       });
+                                       if channel.is_shutdown() {
+                                               return false;
+                                       }
                                }
                                if let Some(funding_txo) = channel.get_funding_txo() {
                                        for tx in txn_matched {
                                                for inp in tx.input.iter() {
                                                        if inp.prev_hash == funding_txo.txid && inp.prev_index == funding_txo.index as u32 {
                                                                if let Some(short_id) = channel.get_short_channel_id() {
-                                                                       short_to_ids_to_remove.push(short_id);
+                                                                       short_to_id.remove(&short_id);
                                                                }
-                                                               channel.force_shutdown();
+                                                               // It looks like our counterparty went on-chain. We go ahead and
+                                                               // broadcast our latest local state as well here, just in case its
+                                                               // some kind of SPV attack, though we expect these to be dropped.
+                                                               failed_channels.push(channel.force_shutdown());
                                                                if let Ok(update) = self.get_channel_update(&channel) {
                                                                        new_events.push(events::Event::BroadcastChannelUpdate {
                                                                                msg: update
@@ -1112,11 +1213,15 @@ impl ChainListener for ChannelManager {
                                                }
                                        }
                                }
-                               if channel.channel_monitor().would_broadcast_at_height(height) {
+                               if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
                                        if let Some(short_id) = channel.get_short_channel_id() {
-                                               short_to_ids_to_remove.push(short_id);
+                                               short_to_id.remove(&short_id);
                                        }
-                                       channel.force_shutdown();
+                                       failed_channels.push(channel.force_shutdown());
+                                       // If would_broadcast_at_height() is true, the channel_monitor will broadcast
+                                       // the latest local tx for us, so we should skip that here (it doesn't really
+                                       // hurt anything, but does make tests a bit simpler).
+                                       failed_channels.last_mut().unwrap().0 = Vec::new();
                                        if let Ok(update) = self.get_channel_update(&channel) {
                                                new_events.push(events::Event::BroadcastChannelUpdate {
                                                        msg: update
@@ -1126,12 +1231,9 @@ impl ChainListener for ChannelManager {
                                }
                                true
                        });
-                       for to_remove in short_to_ids_to_remove {
-                               channel_state.short_to_id.remove(&to_remove);
-                       }
-                       for to_insert in short_to_ids_to_insert {
-                               channel_state.short_to_id.insert(to_insert.0, to_insert.1);
-                       }
+               }
+               for failure in failed_channels.drain(..) {
+                       self.finish_force_close_channel(failure);
                }
                let mut pending_events = self.pending_events.lock().unwrap();
                for funding_locked in new_events.drain(..) {
@@ -1142,23 +1244,38 @@ impl ChainListener for ChannelManager {
 
        /// We force-close the channel without letting our counterparty participate in the shutdown
        fn block_disconnected(&self, header: &BlockHeader) {
-               let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_lock.borrow_parts();
-               let short_to_id = channel_state.short_to_id;
-               channel_state.by_id.retain(|_,  v| {
-                       if v.block_disconnected(header) {
-                               let tx = v.force_shutdown();
-                               for broadcast_tx in tx {
-                                       self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
-                               }
-                               if let Some(short_id) = v.get_short_channel_id() {
-                                       short_to_id.remove(&short_id);
+               let mut new_events = Vec::new();
+               let mut failed_channels = Vec::new();
+               {
+                       let mut channel_lock = self.channel_state.lock().unwrap();
+                       let channel_state = channel_lock.borrow_parts();
+                       let short_to_id = channel_state.short_to_id;
+                       channel_state.by_id.retain(|_,  v| {
+                               if v.block_disconnected(header) {
+                                       if let Some(short_id) = v.get_short_channel_id() {
+                                               short_to_id.remove(&short_id);
+                                       }
+                                       failed_channels.push(v.force_shutdown());
+                                       if let Ok(update) = self.get_channel_update(&v) {
+                                               new_events.push(events::Event::BroadcastChannelUpdate {
+                                                       msg: update
+                                               });
+                                       }
+                                       false
+                               } else {
+                                       true
                                }
-                               false
-                       } else {
-                               true
+                       });
+               }
+               for failure in failed_channels.drain(..) {
+                       self.finish_force_close_channel(failure);
+               }
+               if !new_events.is_empty() {
+                       let mut pending_events = self.pending_events.lock().unwrap();
+                       for funding_locked in new_events.drain(..) {
+                               pending_events.push(funding_locked);
                        }
-               });
+               }
                self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
        }
 }
@@ -1176,13 +1293,13 @@ impl ChannelMessageHandler for ChannelManager {
 
                let chan_keys = if cfg!(feature = "fuzztarget") {
                        ChannelKeys {
-                               funding_key:               SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               revocation_base_key:       SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               payment_base_key:          SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               delayed_payment_base_key:  SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               htlc_base_key:             SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               channel_close_key:         SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
-                               channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
+                               funding_key:               SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]).unwrap(),
+                               revocation_base_key:       SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]).unwrap(),
+                               payment_base_key:          SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0]).unwrap(),
+                               delayed_payment_base_key:  SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0]).unwrap(),
+                               htlc_base_key:             SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]).unwrap(),
+                               channel_close_key:         SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0]).unwrap(),
+                               channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0]).unwrap(),
                                commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                        }
                } else {
@@ -1194,7 +1311,7 @@ impl ChannelMessageHandler for ChannelManager {
                        }
                };
 
-               let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly)?;
+               let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))?;
                let accept_msg = channel.get_accept_channel()?;
                channel_state.by_id.insert(channel.channel_id(), channel);
                Ok(accept_msg)
@@ -1225,26 +1342,24 @@ impl ChannelMessageHandler for ChannelManager {
        }
 
        fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, HandleError> {
-               //TODO: broke this - a node shouldn't be able to get their channel removed by sending a
-               //funding_created a second time, or long after the first, or whatever (note this also
-               //leaves the short_to_id map in a busted state.
                let (chan, funding_msg, monitor_update) = {
                        let mut channel_state = self.channel_state.lock().unwrap();
-                       match channel_state.by_id.remove(&msg.temporary_channel_id) {
-                               Some(mut chan) => {
-                                       if chan.get_their_node_id() != *their_node_id {
+                       match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_their_node_id() != *their_node_id {
                                                return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
                                        }
-                                       match chan.funding_created(msg) {
+                                       match chan.get_mut().funding_created(msg) {
                                                Ok((funding_msg, monitor_update)) => {
-                                                       (chan, funding_msg, monitor_update)
+                                                       (chan.remove(), funding_msg, monitor_update)
                                                },
                                                Err(e) => {
+                                                       //TODO: Possibly remove the channel depending on e.action
                                                        return Err(e);
                                                }
                                        }
                                },
-                               None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+                               hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
                        }
                }; // Release channel lock for install_watch_outpoint call,
                   // note that this means if the remote end is misbehaving and sends a message for the same
@@ -1254,7 +1369,17 @@ impl ChannelMessageHandler for ChannelManager {
                        unimplemented!();
                }
                let mut channel_state = self.channel_state.lock().unwrap();
-               channel_state.by_id.insert(funding_msg.channel_id, chan);
+               match channel_state.by_id.entry(funding_msg.channel_id) {
+                       hash_map::Entry::Occupied(_) => {
+                               return Err(HandleError {
+                                       err: "Duplicate channel_id!",
+                                       action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id: funding_msg.channel_id, data: "Already had channel with the new channel_id".to_owned() } })
+                               });
+                       },
+                       hash_map::Entry::Vacant(e) => {
+                               e.insert(chan);
+                       }
+               }
                Ok(funding_msg)
        }
 
@@ -1508,8 +1633,6 @@ impl ChannelMessageHandler for ChannelManager {
                                        hmac: next_hop_data.hmac.clone(),
                                };
 
-                               //TODO: Check amt_to_forward and outgoing_cltv_value are within acceptable ranges!
-
                                PendingForwardHTLCInfo {
                                        onion_packet: Some(outgoing_packet),
                                        payment_hash: msg.payment_hash.clone(),
@@ -1531,6 +1654,17 @@ impl ChannelMessageHandler for ChannelManager {
                                Some(id) => id.clone(),
                        };
                        let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
+                       let fee = chan.get_our_fee_base_msat(&*self.fee_estimator) + (pending_forward_info.amt_to_forward * self.fee_proportional_millionths as u64 / 1000000) as u32;
+                       if msg.amount_msat < fee as u64 || (msg.amount_msat - fee as u64) < pending_forward_info.amt_to_forward {
+                               log_debug!(self, "HTLC {} incorrect amount: in {} out {} fee required {}", msg.htlc_id, msg.amount_msat, pending_forward_info.amt_to_forward, fee);
+                               let chan_update = self.get_channel_update(chan).unwrap();
+                               return_err!("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, &chan_update.encode_with_len()[..]);
+                       }
+                       if (msg.cltv_expiry as u64) < pending_forward_info.outgoing_cltv_value as u64 + CLTV_EXPIRY_DELTA as u64 {
+                               log_debug!(self, "HTLC {} incorrect CLTV: in {} out {} delta required {}", msg.htlc_id, msg.cltv_expiry, pending_forward_info.outgoing_cltv_value, CLTV_EXPIRY_DELTA);
+                               let chan_update = self.get_channel_update(chan).unwrap();
+                               return_err!("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, &chan_update.encode_with_len()[..]);
+                       }
                        if !chan.is_live() {
                                let chan_update = self.get_channel_update(chan).unwrap();
                                return_err!("Forwarding channel is not in a ready state.", 0x1000 | 7, &chan_update.encode_with_len()[..]);
@@ -1543,20 +1677,14 @@ impl ChannelMessageHandler for ChannelManager {
                // destination. That's OK since those nodes are probably busted or trying to do network
                // mapping through repeated loops. In either case, we want them to stop talking to us, so
                // we send permanent_node_failure.
-               match &claimable_htlcs_entry {
-                       &hash_map::Entry::Occupied(ref e) => {
-                               let mut acceptable_cycle = false;
-                               match e.get() {
-                                       &PendingOutboundHTLC::OutboundRoute { .. } => {
-                                               acceptable_cycle = pending_forward_info.short_channel_id == 0;
-                                       },
-                                       _ => {},
-                               }
-                               if !acceptable_cycle {
-                                       return_err!("Payment looped through us twice", 0x4000 | 0x2000 | 2, &[0;0]);
-                               }
-                       },
-                       _ => {},
+               if let &hash_map::Entry::Occupied(ref e) = &claimable_htlcs_entry {
+                       let mut acceptable_cycle = false;
+                       if let &PendingOutboundHTLC::OutboundRoute { .. } = e.get() {
+                               acceptable_cycle = pending_forward_info.short_channel_id == 0;
+                       }
+                       if !acceptable_cycle {
+                               return_err!("Payment looped through us twice", 0x4000 | 0x2000 | 2, &[0;0]);
+                       }
                }
 
                let (source_short_channel_id, res) = match channel_state.by_id.get_mut(&msg.channel_id) {
@@ -1571,7 +1699,7 @@ impl ChannelMessageHandler for ChannelManager {
                                pending_forward_info.prev_short_channel_id = short_channel_id;
                                (short_channel_id, chan.update_add_htlc(&msg, pending_forward_info)?)
                        },
-                       None => return Err(HandleError{err: "Failed to find corresponding channel", action: None}), //TODO: panic?
+                       None => return Err(HandleError{err: "Failed to find corresponding channel", action: None}),
                };
 
                match claimable_htlcs_entry {
@@ -1581,7 +1709,7 @@ impl ChannelMessageHandler for ChannelManager {
                                        &mut PendingOutboundHTLC::OutboundRoute { ref route, ref session_priv } => {
                                                (route.clone(), session_priv.clone())
                                        },
-                                       _ => { panic!("WAT") },
+                                       _ => unreachable!(),
                                };
                                *outbound_route = PendingOutboundHTLC::CycledRoute {
                                        source_short_channel_id,
@@ -1607,22 +1735,16 @@ impl ChannelMessageHandler for ChannelManager {
                // is broken, we may have enough info to get our own money!
                self.claim_funds_internal(msg.payment_preimage.clone(), false);
 
-               let monitor = {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       match channel_state.by_id.get_mut(&msg.channel_id) {
-                               Some(chan) => {
-                                       if chan.get_their_node_id() != *their_node_id {
-                                               return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
-                                       }
-                                       chan.update_fulfill_htlc(&msg)?
-                               },
-                               None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
-                       }
-               };
-               if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
-                       unimplemented!();
+               let mut channel_state = self.channel_state.lock().unwrap();
+               match channel_state.by_id.get_mut(&msg.channel_id) {
+                       Some(chan) => {
+                               if chan.get_their_node_id() != *their_node_id {
+                                       return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+                               }
+                               chan.update_fulfill_htlc(&msg)
+                       },
+                       None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
                }
-               Ok(())
        }
 
        fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
@@ -1837,6 +1959,7 @@ impl ChannelMessageHandler for ChannelManager {
 
        fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
                let mut new_events = Vec::new();
+               let mut failed_channels = Vec::new();
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = channel_state_lock.borrow_parts();
@@ -1847,10 +1970,7 @@ impl ChannelMessageHandler for ChannelManager {
                                                if let Some(short_id) = chan.get_short_channel_id() {
                                                        short_to_id.remove(&short_id);
                                                }
-                                               let txn_to_broadcast = chan.force_shutdown();
-                                               for tx in txn_to_broadcast {
-                                                       self.tx_broadcaster.broadcast_transaction(&tx);
-                                               }
+                                               failed_channels.push(chan.force_shutdown());
                                                if let Ok(update) = self.get_channel_update(&chan) {
                                                        new_events.push(events::Event::BroadcastChannelUpdate {
                                                                msg: update
@@ -1871,6 +1991,9 @@ impl ChannelMessageHandler for ChannelManager {
                                }
                        }
                }
+               for failure in failed_channels.drain(..) {
+                       self.finish_force_close_channel(failure);
+               }
                if !new_events.is_empty() {
                        let mut pending_events = self.pending_events.lock().unwrap();
                        for event in new_events.drain(..) {
@@ -1891,6 +2014,7 @@ mod tests {
        use ln::msgs::{MsgEncodable,ChannelMessageHandler,RoutingMessageHandler};
        use util::test_utils;
        use util::events::{Event, EventsProvider};
+       use util::logger::Logger;
 
        use bitcoin::util::hash::Sha256dHash;
        use bitcoin::blockdata::block::{Block, BlockHeader};
@@ -2087,7 +2211,7 @@ mod tests {
 
        static mut CHAN_COUNT: u32 = 0;
        fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
-               node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 42).unwrap();
+               node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap();
 
                let events_1 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_1.len(), 1);
@@ -2416,10 +2540,9 @@ mod tests {
                                        {
                                                let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
                                                if $last_node {
-                                                       assert_eq!(added_monitors.len(), 1);
+                                                       assert_eq!(added_monitors.len(), 0);
                                                } else {
-                                                       assert_eq!(added_monitors.len(), 2);
-                                                       assert!(added_monitors[0].0 != added_monitors[1].0);
+                                                       assert_eq!(added_monitors.len(), 1);
                                                }
                                                added_monitors.clear();
                                        }
@@ -2606,10 +2729,11 @@ mod tests {
                let mut nodes = Vec::new();
                let mut rng = thread_rng();
                let secp_ctx = Secp256k1::new();
+               let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
 
                for _ in 0..node_count {
                        let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
-                       let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new());
+                       let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Arc::clone(&logger)));
                        let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
                        let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
                        let node_id = {
@@ -2617,8 +2741,8 @@ mod tests {
                                rng.fill_bytes(&mut key_slice);
                                SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
                        };
-                       let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone()).unwrap();
-                       let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id).unwrap());
+                       let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap();
+                       let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id).unwrap(), Arc::clone(&logger));
                        nodes.push(Node { feeest, chain_monitor, tx_broadcaster, chan_monitor, node_id, node, router });
                }
 
@@ -2862,7 +2986,7 @@ mod tests {
                        let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
-                       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+                       test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
                }
                get_announce_close_broadcast_events(&nodes, 0, 1);
                assert_eq!(nodes[0].node.list_channels().len(), 0);
@@ -2877,7 +3001,7 @@ mod tests {
                        let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
                        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
-                       assert_eq!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+                       test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
                }
                get_announce_close_broadcast_events(&nodes, 1, 2);
                assert_eq!(nodes[1].node.list_channels().len(), 0);
@@ -2972,14 +3096,15 @@ mod tests {
                        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                        {
                                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-                               assert_eq!(node_txn.len(), 1);
+                               assert_eq!(node_txn.len(), 2);
                                assert_eq!(node_txn[0].input.len(), 1);
 
                                let mut funding_tx_map = HashMap::new();
                                funding_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
                                node_txn[0].verify(&funding_tx_map).unwrap();
-                               node_txn.clear();
+                               node_txn.swap_remove(0);
                        }
+                       test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                        nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                        let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);