Merge pull request #977 from TheBlueMatt/2021-06-fix-double-claim-close
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 28 Jul 2021 01:24:27 +0000 (01:24 +0000)
committerGitHub <noreply@github.com>
Wed, 28 Jul 2021 01:24:27 +0000 (01:24 +0000)
Handle double-HTLC-claims without failing the backwards channel

1  2 
lightning/src/chain/channelmonitor.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/onion_route_tests.rs

index cf368f15259cad2e9f03e3cc3728eb61b80a701b,42c3e13a34feeff236a1b19712470808f0b19f69..a8ec8ee97b73f062a28492c7f472e512d119b3f9
@@@ -55,7 -55,7 +55,7 @@@ use prelude::*
  use core::{cmp, mem};
  use std::io::Error;
  use core::ops::Deref;
 -use std::sync::Mutex;
 +use sync::Mutex;
  
  /// An update generated by the underlying Channel itself which contains some new information the
  /// ChannelMonitor should be made aware of.
@@@ -114,7 -114,7 +114,7 @@@ impl Readable for ChannelMonitorUpdate 
  }
  
  /// An error enum representing a failure to persist a channel monitor update.
- #[derive(Clone, Debug)]
+ #[derive(Clone, Copy, Debug, PartialEq)]
  pub enum ChannelMonitorUpdateErr {
        /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
        /// our state failed, but is expected to succeed at some point in the future).
@@@ -2843,7 -2843,7 +2843,7 @@@ mod tests 
        use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
        use bitcoin::secp256k1::key::{SecretKey,PublicKey};
        use bitcoin::secp256k1::Secp256k1;
 -      use std::sync::{Arc, Mutex};
 +      use sync::{Arc, Mutex};
        use chain::keysinterface::InMemorySigner;
        use prelude::*;
  
                let secp_ctx = Secp256k1::new();
                let logger = Arc::new(TestLogger::new());
                let broadcaster = Arc::new(TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
 -              let fee_estimator = Arc::new(TestFeeEstimator { sat_per_kw: 253 });
 +              let fee_estimator = Arc::new(TestFeeEstimator { sat_per_kw: Mutex::new(253) });
  
                let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
index 7c3aa4c3b1b8e3351d73a5f8783999fde27a29ad,2918b4cef13aaf2baa2c83c280aacc6c075f2fae..90519f286b6546628df70781ab85abf3ba195272
@@@ -28,7 -28,7 +28,7 @@@ use ln::msgs::{ChannelMessageHandler, E
  use routing::router::get_route;
  use util::config::UserConfig;
  use util::enforcing_trait_impls::EnforcingSigner;
 -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
  use util::errors::APIError;
  use util::ser::{ReadableArgs, Writeable};
  use util::test_utils::TestBroadcaster;
@@@ -41,7 -41,7 +41,7 @@@ use ln::functional_test_utils::*
  use util::test_utils;
  
  use prelude::*;
 -use std::sync::{Arc, Mutex};
 +use sync::{Arc, Mutex};
  
  // If persister_fail is true, we have the persister return a PermanentFailure
  // instead of the higher-level ChainMonitor.
@@@ -197,7 -197,7 +197,7 @@@ fn do_test_simple_monitor_temporary_upd
        if disconnect {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
  
        match persister_fail {
        let events_3 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_3.len(), 1);
        match events_3[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash_1, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_1, *payment_secret);
                        assert_eq!(amt, 1000000);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_1, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
        if disconnect {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
  
        // ...and make sure we can force-close a frozen channel
@@@ -594,16 -589,11 +594,16 @@@ fn do_test_monitor_temporary_update_fai
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash_2, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_2, *payment_secret);
                        assert_eq!(amt, 1000000);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_2, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
@@@ -714,16 -704,11 +714,16 @@@ fn test_monitor_update_fail_cs() 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
 -              Event::PaymentReceived { payment_hash, payment_preimage, payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash, our_payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(our_payment_secret, payment_secret);
                        assert_eq!(amt, 1000000);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(our_payment_secret, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        };
@@@ -1173,10 -1158,7 +1173,10 @@@ fn test_monitor_update_fail_reestablish
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
  
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
 -      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      assert_eq!(
 +              get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
 +                      .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 +
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
  
        assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
  
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
 +      assert_eq!(
 +              get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
 +                      .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
  
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
        check_added_monitors!(nodes[1], 0);
 -      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      assert_eq!(
 +              get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
 +                      .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
  
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@@ -1375,14 -1352,14 +1375,14 @@@ fn claim_while_disconnected_monitor_upd
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
  
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
 -      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
  
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
  
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
 -      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@@ -1515,9 -1492,7 +1515,9 @@@ fn monitor_failed_no_reestablish_respon
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
  
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
 +      let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
 +      let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
  
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@@ -1727,30 -1702,20 +1727,30 @@@ fn test_monitor_update_fail_claim() 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash_2, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_2, *payment_secret);
                        assert_eq!(1_000_000, amt);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_2, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
        match events[1] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash_3, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_3, *payment_secret);
                        assert_eq!(1_000_000, amt);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_3, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
@@@ -1947,7 -1912,7 +1947,7 @@@ fn do_during_funding_monitor_fail(confi
        // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
  
@@@ -2039,7 -2004,7 +2039,7 @@@ fn test_path_paused_mpp() 
        // Pass the first HTLC of the payment along to nodes[3].
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
 -      pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), payment_secret, events.pop().unwrap(), false);
 +      pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
  
        // And check that, after we successfully update the monitor for chan_2 we can pass the second
        // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
 -      pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), payment_secret, events.pop().unwrap(), true);
 +      pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
  
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
  }
@@@ -2250,3 -2215,119 +2250,119 @@@ fn channel_holding_cell_serialize() 
        do_channel_holding_cell_serialize(true, false);
        do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
  }
+ #[derive(PartialEq)]
+ enum HTLCStatusAtDupClaim {
+       Received,
+       HoldingCell,
+       Cleared,
+ }
+ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
+       // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
+       // along the payment path before waiting for a full commitment_signed dance. This is great, but
+       // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
+       // reconnects, and then has to re-send its update_fulfill_htlc message again.
+       // In previous code, we didn't handle the double-claim correctly, spuriously closing the
+       // channel on which the inbound HTLC was received.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let mut as_raa = None;
+       if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
+               // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
+               // awaiting a remote revoke_and_ack from nodes[0].
+               let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
+                       &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
+               nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
+               check_added_monitors!(nodes[1], 1);
+               let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+               check_added_monitors!(nodes[0], 1);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+               check_added_monitors!(nodes[0], 1);
+               as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+       }
+       let fulfill_msg = msgs::UpdateFulfillHTLC {
+               channel_id: chan_2,
+               htlc_id: 0,
+               payment_preimage,
+       };
+       if second_fails {
+               assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
+               expect_pending_htlcs_forwardable!(nodes[2]);
+               check_added_monitors!(nodes[2], 1);
+               get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       } else {
+               assert!(nodes[2].node.claim_funds(payment_preimage));
+               check_added_monitors!(nodes[2], 1);
+               let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+               assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
+               // Check that the message we're about to deliver matches the one generated:
+               assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
+       }
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
+       check_added_monitors!(nodes[1], 1);
+       let mut bs_updates = None;
+       if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
+               bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
+               assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
+               expect_payment_sent!(nodes[0], payment_preimage);
+               if htlc_status == HTLCStatusAtDupClaim::Cleared {
+                       commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
+               }
+       } else {
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       }
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       if second_fails {
+               reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+               expect_pending_htlcs_forwardable!(nodes[1]);
+       } else {
+               reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       }
+       if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
+               check_added_monitors!(nodes[1], 1);
+               expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
+               bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
+               assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
+               expect_payment_sent!(nodes[0], payment_preimage);
+       }
+       if htlc_status != HTLCStatusAtDupClaim::Cleared {
+               commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
+       }
+ }
+ #[test]
+ fn test_reconnect_dup_htlc_claims() {
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
+       do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
+ }
index dc2dd758a9ea97bae465a69836f79200190c97d7,5a0ceabed531159182a059c3b6e7f8bfa29556b2..e08b8af49a3a40f5992f8e09b77f99f8279135fd
@@@ -44,8 -44,8 +44,8 @@@ use util::scid_utils::scid_from_parts
  use prelude::*;
  use core::{cmp,mem,fmt};
  use core::ops::Deref;
 -#[cfg(any(test, feature = "fuzztarget"))]
 -use std::sync::Mutex;
 +#[cfg(any(test, feature = "fuzztarget", debug_assertions))]
 +use sync::Mutex;
  use bitcoin::hashes::hex::ToHex;
  use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0;
  
@@@ -301,6 -301,33 +301,33 @@@ pub struct CounterpartyForwardingInfo 
        pub cltv_expiry_delta: u16,
  }
  
+ /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
+ /// description
+ enum UpdateFulfillFetch {
+       NewClaim {
+               monitor_update: ChannelMonitorUpdate,
+               msg: Option<msgs::UpdateFulfillHTLC>,
+       },
+       DuplicateClaim {},
+ }
+ /// The return type of get_update_fulfill_htlc_and_commit.
+ pub enum UpdateFulfillCommitFetch {
+       /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
+       /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
+       /// previously placed in the holding cell (and has since been removed).
+       NewClaim {
+               /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
+               monitor_update: ChannelMonitorUpdate,
+               /// The update_fulfill message and commitment_signed message (if the claim was not placed
+               /// in the holding cell).
+               msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
+       },
+       /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
+       /// or has been forgotten (presumably previously claimed).
+       DuplicateClaim {},
+ }
  // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
  // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
  // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
@@@ -374,10 -401,10 +401,10 @@@ pub(super) struct Channel<Signer: Sign
  
        #[cfg(debug_assertions)]
        /// Max to_local and to_remote outputs in a locally-generated commitment transaction
 -      holder_max_commitment_tx_output: ::std::sync::Mutex<(u64, u64)>,
 +      holder_max_commitment_tx_output: Mutex<(u64, u64)>,
        #[cfg(debug_assertions)]
        /// Max to_local and to_remote outputs in a remote-generated commitment transaction
 -      counterparty_max_commitment_tx_output: ::std::sync::Mutex<(u64, u64)>,
 +      counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
  
        last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
  
        ///
        /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
        pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
+       #[cfg(any(test, feature = "fuzztarget"))]
+       // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
+       // corresponding HTLC on the inbound path. If, then, the outbound path channel is
+       // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
+       // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
+       // is fine, but as a sanity check in our failure to generate the second claim, we check here
+       // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
+       historical_inbound_htlc_fulfills: HashSet<u64>,
  }
  
  #[cfg(any(test, feature = "fuzztarget"))]
@@@ -456,6 -492,7 +492,6 @@@ struct CommitmentTxInfoCached 
  }
  
  pub const OUR_MAX_HTLCS: u16 = 50; //TODO
 -const SPENDING_INPUT_FOR_A_OUTPUT_WEIGHT: u64 = 79; // prevout: 36, nSequence: 4, script len: 1, witness lengths: (3+1)/4, sig: 73/4, if-selector: 1, redeemScript: (6 ops + 2*33 pubkeys + 1*2 delay)/4
  
  #[cfg(not(test))]
  const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
@@@ -595,9 -632,9 +631,9 @@@ impl<Signer: Sign> Channel<Signer> 
                        monitor_pending_failures: Vec::new(),
  
                        #[cfg(debug_assertions)]
 -                      holder_max_commitment_tx_output: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 +                      holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
                        #[cfg(debug_assertions)]
 -                      counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 +                      counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
  
                        last_sent_closing_fee: None,
  
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
  
                        workaround_lnd_bug_4006: None,
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       historical_inbound_htlc_fulfills: HashSet::new(),
                })
        }
  
                        monitor_pending_failures: Vec::new(),
  
                        #[cfg(debug_assertions)]
 -                      holder_max_commitment_tx_output: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 +                      holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
                        #[cfg(debug_assertions)]
 -                      counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 +                      counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
  
                        last_sent_closing_fee: None,
  
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
  
                        workaround_lnd_bug_4006: None,
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       historical_inbound_htlc_fulfills: HashSet::new(),
                };
  
                Ok(chan)
                make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
  
-       /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
-       /// return Ok(_) if debug assertions are turned on or preconditions are met.
-       ///
-       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
-       /// but then have a reorg which settles on an HTLC-failure on chain.
-       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
+       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
                                                if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
                                                } else {
                                                        log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
+                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
-                                               debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
-                                               return Ok((None, None));
+                                               return UpdateFulfillFetch::DuplicateClaim {};
                                        },
                                        _ => {
                                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
                        }
                }
                if pending_idx == core::usize::MAX {
-                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+                       // this is simply a duplicate claim, not previously failed and we lost funds.
+                       debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                       return UpdateFulfillFetch::DuplicateClaim {};
                }
  
                // Now update local state:
                                                if htlc_id_arg == htlc_id {
                                                        // Make sure we don't leave latest_monitor_update_id incremented here:
                                                        self.latest_monitor_update_id -= 1;
-                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled");
-                                                       return Ok((None, None));
+                                                       #[cfg(any(test, feature = "fuzztarget"))]
+                                                       debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                                                       return UpdateFulfillFetch::DuplicateClaim {};
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
-                                                       return Ok((None, Some(monitor_update)));
+                                                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                                                }
                                        },
                                        _ => {}
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
-                       return Ok((None, Some(monitor_update)));
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                }
+               #[cfg(any(test, feature = "fuzztarget"))]
+               self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
  
                {
                        let htlc = &mut self.pending_inbound_htlcs[pending_idx];
                        if let InboundHTLCState::Committed = htlc.state {
                        } else {
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                               return Ok((None, Some(monitor_update)));
+                               return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                        }
                        log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
  
-               Ok((Some(msgs::UpdateFulfillHTLC {
-                       channel_id: self.channel_id(),
-                       htlc_id: htlc_id_arg,
-                       payment_preimage: payment_preimage_arg,
-               }), Some(monitor_update)))
+               UpdateFulfillFetch::NewClaim {
+                       monitor_update,
+                       msg: Some(msgs::UpdateFulfillHTLC {
+                               channel_id: self.channel_id(),
+                               htlc_id: htlc_id_arg,
+                               payment_preimage: payment_preimage_arg,
+                       }),
+               }
        }
  
-       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
-               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? {
-                       (Some(update_fulfill_htlc), Some(mut monitor_update)) => {
-                               let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?;
+       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
+               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => {
+                               let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
+                                       Err(e) => return Err((e, monitor_update)),
+                                       Ok(res) => res
+                               };
                                // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
-                               Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
-                       },
-                       (Some(update_fulfill_htlc), None) => {
-                               let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?;
-                               Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
+                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) })
                        },
-                       (None, Some(monitor_update)) => Ok((None, Some(monitor_update))),
-                       (None, None) => Ok((None, None))
+                       UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }),
+                       UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
                }
        }
  
-       /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
-       /// return Ok(_) if debug assertions are turned on or preconditions are met.
-       ///
-       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
-       /// but then have a reorg which settles on an HTLC-failure on chain.
+       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+       /// before we fail backwards.
+       /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return
+       /// Ok(_) if debug assertions are turned on or preconditions are met.
        pub fn get_update_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
                        if htlc.htlc_id == htlc_id_arg {
                                match htlc.state {
                                        InboundHTLCState::Committed => {},
-                                       InboundHTLCState::LocalRemoved(_) => {
-                                               debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled");
+                                       InboundHTLCState::LocalRemoved(ref reason) => {
+                                               if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+                                               } else {
+                                                       debug_assert!(false, "Tried to fail an HTLC that was already failed");
+                                               }
                                                return Ok(None);
                                        },
                                        _ => {
                        }
                }
                if pending_idx == core::usize::MAX {
-                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+                       // is simply a duplicate fail, not previously failed and we failed-back too early.
+                       debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                       return Ok(None);
                }
  
                // Now update local state:
                                match pending_update {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
-                                                       debug_assert!(false, "Tried to fail an HTLC that was already fulfilled");
-                                                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+                                                       #[cfg(any(test, feature = "fuzztarget"))]
+                                                       debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                                                       return Ok(None);
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
-                                               match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
-                                                       Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => {
-                                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
-                                                               if let Some(mut additional_monitor_update) = additional_monitor_update_opt {
-                                                                       monitor_update.updates.append(&mut additional_monitor_update.updates);
-                                                               }
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
+                                               // If an HTLC claim was previously added to the holding cell (via
+                                               // `get_update_fulfill_htlc`, then generating the claim message itself must
+                                               // not fail - any in between attempts to claim the HTLC will have resulted
+                                               // in it hitting the holding cell again and we cannot change the state of a
+                                               // holding cell HTLC from fulfill to anything else.
+                                               let (update_fulfill_msg_option, mut additional_monitor_update) =
+                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+                                                               (msg, monitor_update)
+                                                       } else { unreachable!() };
+                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+                                               monitor_update.updates.append(&mut additional_monitor_update.updates);
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
                                                match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) {
-                                                       Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()),
+                                                       Ok(update_fail_msg_option) => {
+                                                               // If an HTLC failure was previously added to the holding cell (via
+                                                               // `get_update_fail_htlc`) then generating the fail message itself
+                                                               // must not fail - we should never end up in a state where we
+                                                               // double-fail an HTLC or fail-then-claim an HTLC as it indicates
+                                                               // we didn't wait for a full revocation before failing.
+                                                               update_fail_htlcs.push(update_fail_msg_option.unwrap())
+                                                       },
                                                        Err(e) => {
                                                                if let ChannelError::Ignore(_) = e {}
                                                                else {
        }
  
        pub fn get_fee_proportional_millionths(&self) -> u32 {
 -              self.config.fee_proportional_millionths
 +              self.config.forwarding_fee_proportional_millionths
        }
  
        pub fn get_cltv_expiry_delta(&self) -> u16 {
  
        /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
        /// Allowed in any state (including after shutdown)
 -      pub fn get_holder_fee_base_msat<F: Deref>(&self, fee_estimator: &F) -> u32
 -              where F::Target: FeeEstimator
 -      {
 -              // For lack of a better metric, we calculate what it would cost to consolidate the new HTLC
 -              // output value back into a transaction with the regular channel output:
 -
 -              // the fee cost of the HTLC-Success/HTLC-Timeout transaction:
 -              let mut res = self.feerate_per_kw as u64 * cmp::max(HTLC_TIMEOUT_TX_WEIGHT, HTLC_SUCCESS_TX_WEIGHT) / 1000;
 -
 -              if self.is_outbound() {
 -                      // + the marginal fee increase cost to us in the commitment transaction:
 -                      res += self.feerate_per_kw as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC / 1000;
 -              }
 -
 -              // + the marginal cost of an input which spends the HTLC-Success/HTLC-Timeout output:
 -              res += fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal) as u64 * SPENDING_INPUT_FOR_A_OUTPUT_WEIGHT / 1000;
 -
 -              res as u32
 +      pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
 +              self.config.forwarding_fee_base_msat
        }
  
        /// Returns true if we've ever received a message from the remote end for this Channel
                if !self.is_outbound() {
                        // Check that we won't violate the remote channel reserve by adding this HTLC.
                        let counterparty_balance_msat = self.channel_value_satoshis * 1000 - self.value_to_self_msat;
 -                      let holder_selected_chan_reserve_msat = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
 +                      let holder_selected_chan_reserve_msat = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000;
                        let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
                        let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                        if counterparty_balance_msat < holder_selected_chan_reserve_msat + counterparty_commit_tx_fee_msat {
@@@ -4443,7 -4519,7 +4502,7 @@@ fn is_unsupported_shutdown_script(their
        return !script.is_p2pkh() && !script.is_p2sh() && !script.is_v0_p2wpkh() && !script.is_v0_p2wsh()
  }
  
 -const SERIALIZATION_VERSION: u8 = 1;
 +const SERIALIZATION_VERSION: u8 = 2;
  const MIN_SERIALIZATION_VERSION: u8 = 1;
  
  impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
@@@ -4485,13 -4561,7 +4544,13 @@@ impl<Signer: Sign> Writeable for Channe
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
  
                self.user_id.write(writer)?;
 -              self.config.write(writer)?;
 +
 +              // Write out the old serialization for the config object. This is read by version-1
 +              // deserializers, but we will read the version in the TLV at the end instead.
 +              self.config.forwarding_fee_proportional_millionths.write(writer)?;
 +              self.config.cltv_expiry_delta.write(writer)?;
 +              self.config.announced_channel.write(writer)?;
 +              self.config.commit_upfront_shutdown_pubkey.write(writer)?;
  
                self.channel_id.write(writer)?;
                (self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
                self.counterparty_dust_limit_satoshis.write(writer)?;
                self.holder_dust_limit_satoshis.write(writer)?;
                self.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
 +
 +              // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
                self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
 +
                self.counterparty_htlc_minimum_msat.write(writer)?;
                self.holder_htlc_minimum_msat.write(writer)?;
                self.counterparty_max_accepted_htlcs.write(writer)?;
 +
 +              // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
                self.minimum_depth.unwrap_or(0).write(writer)?;
  
                match &self.counterparty_forwarding_info {
  
                self.channel_update_status.write(writer)?;
  
+               #[cfg(any(test, feature = "fuzztarget"))]
+               (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
+               #[cfg(any(test, feature = "fuzztarget"))]
+               for htlc in self.historical_inbound_htlc_fulfills.iter() {
+                       htlc.write(writer)?;
+               }
                write_tlv_fields!(writer, {
                        (0, self.announcement_sigs, option),
                        // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
                        // override that.
                        (1, self.minimum_depth, option),
                        (3, self.counterparty_selected_channel_reserve_satoshis, option),
 +                      (5, self.config, required),
                });
  
                Ok(())
@@@ -4705,21 -4776,10 +4771,21 @@@ const MAX_ALLOC_SIZE: usize = 64*1024
  impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
                where K::Target: KeysInterface<Signer = Signer> {
        fn read<R : ::std::io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
 -              let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 +              let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
  
                let user_id = Readable::read(reader)?;
 -              let config: ChannelConfig = Readable::read(reader)?;
 +
 +              let mut config = Some(ChannelConfig::default());
 +              if ver == 1 {
 +                      // Read the old serialization of the ChannelConfig from version 0.0.98.
 +                      config.as_mut().unwrap().forwarding_fee_proportional_millionths = Readable::read(reader)?;
 +                      config.as_mut().unwrap().cltv_expiry_delta = Readable::read(reader)?;
 +                      config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
 +                      config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
 +              } else {
 +                      // Read the 8 bytes of backwards-compatibility ChannelConfig data.
 +                      let mut _val: u64 = Readable::read(reader)?;
 +              }
  
                let channel_id = Readable::read(reader)?;
                let channel_state = Readable::read(reader)?;
                let counterparty_dust_limit_satoshis = Readable::read(reader)?;
                let holder_dust_limit_satoshis = Readable::read(reader)?;
                let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
 -              let mut counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
 -              if counterparty_selected_channel_reserve_satoshis == Some(0) {
 -                      // Versions up to 0.0.98 had counterparty_selected_channel_reserve_satoshis as a
 -                      // non-option, writing 0 for what we now consider None.
 -                      counterparty_selected_channel_reserve_satoshis = None;
 +              let mut counterparty_selected_channel_reserve_satoshis = None;
 +              if ver == 1 {
 +                      // Read the old serialization from version 0.0.98.
 +                      counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
 +              } else {
 +                      // Read the 8 bytes of backwards-compatibility data.
 +                      let _dummy: u64 = Readable::read(reader)?;
                }
                let counterparty_htlc_minimum_msat = Readable::read(reader)?;
                let holder_htlc_minimum_msat = Readable::read(reader)?;
                let counterparty_max_accepted_htlcs = Readable::read(reader)?;
 -              let mut minimum_depth = Some(Readable::read(reader)?);
 -              if minimum_depth == Some(0) {
 -                      // Versions up to 0.0.98 had minimum_depth as a non-option, writing 0 for what we now
 -                      // consider None.
 -                      minimum_depth = None;
 +
 +              let mut minimum_depth = None;
 +              if ver == 1 {
 +                      // Read the old serialization from version 0.0.98.
 +                      minimum_depth = Some(Readable::read(reader)?);
 +              } else {
 +                      // Read the 4 bytes of backwards-compatibility data.
 +                      let _dummy: u32 = Readable::read(reader)?;
                }
  
                let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
  
                let channel_update_status = Readable::read(reader)?;
  
+               #[cfg(any(test, feature = "fuzztarget"))]
+               let mut historical_inbound_htlc_fulfills = HashSet::new();
+               #[cfg(any(test, feature = "fuzztarget"))]
+               {
+                       let htlc_fulfills_len: u64 = Readable::read(reader)?;
+                       for _ in 0..htlc_fulfills_len {
+                               assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
+                       }
+               }
                let mut announcement_sigs = None;
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
                        (3, counterparty_selected_channel_reserve_satoshis, option),
 +                      (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
                });
  
                let mut secp_ctx = Secp256k1::new();
                Ok(Channel {
                        user_id,
  
 -                      config,
 +                      config: config.unwrap(),
                        channel_id,
                        channel_state,
                        secp_ctx,
                        feerate_per_kw,
  
                        #[cfg(debug_assertions)]
 -                      holder_max_commitment_tx_output: ::std::sync::Mutex::new((0, 0)),
 +                      holder_max_commitment_tx_output: Mutex::new((0, 0)),
                        #[cfg(debug_assertions)]
 -                      counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((0, 0)),
 +                      counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
  
                        last_sent_closing_fee,
  
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
  
                        workaround_lnd_bug_4006: None,
+                       #[cfg(any(test, feature = "fuzztarget"))]
+                       historical_inbound_htlc_fulfills,
                })
        }
  }
@@@ -5023,7 -5090,7 +5102,7 @@@ mod tests 
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::Hash;
        use bitcoin::hash_types::{Txid, WPubkeyHash};
 -      use std::sync::Arc;
 +      use sync::Arc;
        use prelude::*;
  
        struct TestFeeEstimator {
index 1fbdde870095576edba2505addfcb91f6d0f9077,482c16356314df373e8ea7ee5645f5febbf175d9..6ebe802b50c8e4749c84772bd2c73d12df44dbdd
@@@ -44,7 -44,7 +44,7 @@@ use chain::transaction::{OutPoint, Tran
  // construct one themselves.
  use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
  pub use ln::channel::CounterpartyForwardingInfo;
- use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+ use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
  use ln::features::{InitFeatures, NodeFeatures};
  use routing::router::{Route, RouteHop};
  use ln::msgs;
@@@ -57,14 -57,14 +57,14 @@@ use util::events::{EventHandler, Events
  use util::{byte_utils, events};
  use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
  use util::chacha20::{ChaCha20, ChaChaReader};
- use util::logger::Logger;
+ use util::logger::{Logger, Level};
  use util::errors::APIError;
  
  use prelude::*;
  use core::{cmp, mem};
  use core::cell::RefCell;
  use std::io::{Cursor, Read};
 -use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 +use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
  use core::sync::atomic::{AtomicUsize, Ordering};
  use core::time::Duration;
  #[cfg(any(test, feature = "allow_wallclock_use"))]
@@@ -99,10 -99,6 +99,10 @@@ enum PendingHTLCRouting 
                payment_data: msgs::FinalOnionHopData,
                incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
        },
 +      ReceiveKeysend {
 +              payment_preimage: PaymentPreimage,
 +              incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
 +      },
  }
  
  #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
@@@ -157,20 -153,14 +157,20 @@@ pub(crate) struct HTLCPreviousHopData 
        outpoint: OutPoint,
  }
  
 -struct ClaimableHTLC {
 -      prev_hop: HTLCPreviousHopData,
 -      value: u64,
 +enum OnionPayload {
        /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
        /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
        /// are part of the same payment.
 -      payment_data: msgs::FinalOnionHopData,
 +      Invoice(msgs::FinalOnionHopData),
 +      /// Contains the payer-provided preimage.
 +      Spontaneous(PaymentPreimage),
 +}
 +
 +struct ClaimableHTLC {
 +      prev_hop: HTLCPreviousHopData,
        cltv_expiry: u32,
 +      value: u64,
 +      onion_payload: OnionPayload,
  }
  
  /// Tracks the inbound corresponding to an outbound HTLC
@@@ -612,29 -602,6 +612,29 @@@ const CHECK_CLTV_EXPIRY_SANITY: u32 = M
  #[allow(dead_code)]
  const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
  
 +/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
 +/// to better separate parameters.
 +#[derive(Clone, Debug, PartialEq)]
 +pub struct ChannelCounterparty {
 +      /// The node_id of our counterparty
 +      pub node_id: PublicKey,
 +      /// The Features the channel counterparty provided upon last connection.
 +      /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
 +      /// many routing-relevant features are present in the init context.
 +      pub features: InitFeatures,
 +      /// The value, in satoshis, that must always be held in the channel for our counterparty. This
 +      /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
 +      /// claiming at least this value on chain.
 +      ///
 +      /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
 +      ///
 +      /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
 +      pub unspendable_punishment_reserve: u64,
 +      /// Information on the fees and requirements that the counterparty requires when forwarding
 +      /// payments to us through this channel.
 +      pub forwarding_info: Option<CounterpartyForwardingInfo>,
 +}
 +
  /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
  #[derive(Clone, Debug, PartialEq)]
  pub struct ChannelDetails {
        /// Note that this means this value is *not* persistent - it can change once during the
        /// lifetime of the channel.
        pub channel_id: [u8; 32],
 +      /// Parameters which apply to our counterparty. See individual fields for more information.
 +      pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
        ///
        /// The position of the funding transaction in the chain. None if the funding transaction has
        /// not yet been confirmed and the channel fully opened.
        pub short_channel_id: Option<u64>,
 -      /// The node_id of our counterparty
 -      pub remote_network_id: PublicKey,
 -      /// The Features the channel counterparty provided upon last connection.
 -      /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
 -      /// many routing-relevant features are present in the init context.
 -      pub counterparty_features: InitFeatures,
        /// The value, in satoshis, of this channel as appears in the funding output
        pub channel_value_satoshis: u64,
        /// The value, in satoshis, that must always be held in the channel for us. This value ensures
        /// This value will be `None` for outbound channels until the counterparty accepts the channel.
        ///
        /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
 -      pub to_self_reserve_satoshis: Option<u64>,
 -      /// The value, in satoshis, that must always be held in the channel for our counterparty. This
 -      /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
 -      /// claiming at least this value on chain.
 -      ///
 -      /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
 -      ///
 -      /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
 -      pub to_remote_reserve_satoshis: u64,
 +      pub unspendable_punishment_reserve: Option<u64>,
        /// The user_id passed in to create_channel, or 0 if the channel was inbound.
        pub user_id: u64,
        /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
        /// time to claim our non-HTLC-encumbered funds.
        ///
        /// This value will be `None` for outbound channels until the counterparty accepts the channel.
 -      pub spend_csv_on_our_commitment_funds: Option<u16>,
 +      pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
        /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
 -      /// Information on the fees and requirements that the counterparty requires when forwarding
 -      /// payments to us through this channel.
 -      pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
  }
  
  /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@@ -818,7 -800,7 +818,7 @@@ macro_rules! convert_chan_err 
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(true);
 -                              (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
 +                              (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                        ChannelError::CloseDelayBroadcast(msg) => {
                                log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(false);
 -                              (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
 +                              (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        }
                }
        }
@@@ -882,8 -864,7 +882,8 @@@ macro_rules! handle_monitor_err 
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
 -                              let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
 +                              let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
 +                                              $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@@ -1147,10 -1128,6 +1147,10 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
        ///
        /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
        /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
 +      ///
 +      /// Note that we do not check if you are currently connected to the given peer. If no
 +      /// connection is available, the outbound `open_channel` message may fail to send, resulting in
 +      /// the channel eventually being silently forgotten.
        pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
                                        channel_id: (*channel_id).clone(),
 +                                      counterparty: ChannelCounterparty {
 +                                              node_id: channel.get_counterparty_node_id(),
 +                                              features: InitFeatures::empty(),
 +                                              unspendable_punishment_reserve: to_remote_reserve_satoshis,
 +                                              forwarding_info: channel.counterparty_forwarding_info(),
 +                                      },
                                        funding_txo: channel.get_funding_txo(),
                                        short_channel_id: channel.get_short_channel_id(),
 -                                      remote_network_id: channel.get_counterparty_node_id(),
 -                                      counterparty_features: InitFeatures::empty(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
 -                                      to_self_reserve_satoshis,
 -                                      to_remote_reserve_satoshis,
 +                                      unspendable_punishment_reserve: to_self_reserve_satoshis,
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
 -                                      spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(),
 +                                      force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
                                        is_funding_locked: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
 -                                      counterparty_forwarding_info: channel.counterparty_forwarding_info(),
                                });
                        }
                }
                let per_peer_state = self.per_peer_state.read().unwrap();
                for chan in res.iter_mut() {
 -                      if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
 -                              chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
 +                      if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
 +                              chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
                        }
                }
                res
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                let chan_update = if let Some(chan) = chan_option {
 -                      if let Ok(update) = self.get_channel_update(&chan) {
 -                              Some(update)
 -                      } else { None }
 +                      self.get_channel_update_for_broadcast(&chan).ok()
                } else { None };
  
                if let Some(update) = chan_update {
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
 -              if let Ok(update) = self.get_channel_update(&chan) {
 +              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
                };
  
                let pending_forward_info = if next_hop_hmac == [0; 32] {
 -                              #[cfg(test)]
 -                              {
 -                                      // In tests, make sure that the initial onion pcket data is, at least, non-0.
 -                                      // We could do some fancy randomness test here, but, ehh, whatever.
 -                                      // This checks for the issue where you can calculate the path length given the
 -                                      // onion data as all the path entries that the originator sent will be here
 -                                      // as-is (and were originally 0s).
 -                                      // Of course reverse path calculation is still pretty easy given naive routing
 -                                      // algorithms, but this fixes the most-obvious case.
 -                                      let mut next_bytes = [0; 32];
 -                                      chacha_stream.read_exact(&mut next_bytes).unwrap();
 -                                      assert_ne!(next_bytes[..], [0; 32][..]);
 -                                      chacha_stream.read_exact(&mut next_bytes).unwrap();
 -                                      assert_ne!(next_bytes[..], [0; 32][..]);
 -                              }
 -
 -                              // OUR PAYMENT!
 -                              // final_expiry_too_soon
 -                              // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
 -                              // HTLC_FAIL_BACK_BUFFER blocks to go.
 -                              // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
 -                              // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
 -                              if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
 -                                      return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
 -                              }
 -                              // final_incorrect_htlc_amount
 -                              if next_hop_data.amt_to_forward > msg.amount_msat {
 -                                      return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
 -                              }
 -                              // final_incorrect_cltv_expiry
 -                              if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
 -                                      return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
 -                              }
 +                      #[cfg(test)]
 +                      {
 +                              // In tests, make sure that the initial onion pcket data is, at least, non-0.
 +                              // We could do some fancy randomness test here, but, ehh, whatever.
 +                              // This checks for the issue where you can calculate the path length given the
 +                              // onion data as all the path entries that the originator sent will be here
 +                              // as-is (and were originally 0s).
 +                              // Of course reverse path calculation is still pretty easy given naive routing
 +                              // algorithms, but this fixes the most-obvious case.
 +                              let mut next_bytes = [0; 32];
 +                              chacha_stream.read_exact(&mut next_bytes).unwrap();
 +                              assert_ne!(next_bytes[..], [0; 32][..]);
 +                              chacha_stream.read_exact(&mut next_bytes).unwrap();
 +                              assert_ne!(next_bytes[..], [0; 32][..]);
 +                      }
  
 -                              let payment_data = match next_hop_data.format {
 -                                      msgs::OnionHopDataFormat::Legacy { .. } => None,
 -                                      msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
 -                                      msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
 -                              };
 +                      // OUR PAYMENT!
 +                      // final_expiry_too_soon
 +                      // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
 +                      // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
 +                      // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
 +                      // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
 +                      // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
 +                      if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
 +                              return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
 +                      }
 +                      // final_incorrect_htlc_amount
 +                      if next_hop_data.amt_to_forward > msg.amount_msat {
 +                              return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
 +                      }
 +                      // final_incorrect_cltv_expiry
 +                      if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
 +                              return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
 +                      }
  
 -                              if payment_data.is_none() {
 -                                      return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
 -                              }
 +                      let routing = match next_hop_data.format {
 +                              msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]),
 +                              msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
 +                              msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
 +                                      if payment_data.is_some() && keysend_preimage.is_some() {
 +                                              return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]);
 +                                      } else if let Some(data) = payment_data {
 +                                              PendingHTLCRouting::Receive {
 +                                                      payment_data: data,
 +                                                      incoming_cltv_expiry: msg.cltv_expiry,
 +                                              }
 +                                      } else if let Some(payment_preimage) = keysend_preimage {
 +                                              // We need to check that the sender knows the keysend preimage before processing this
 +                                              // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
 +                                              // could discover the final destination of X, by probing the adjacent nodes on the route
 +                                              // with a keysend payment of identical payment hash to X and observing the processing
 +                                              // time discrepancies due to a hash collision with X.
 +                                              let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 +                                              if hashed_preimage != msg.payment_hash {
 +                                                      return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]);
 +                                              }
  
 -                              // Note that we could obviously respond immediately with an update_fulfill_htlc
 -                              // message, however that would leak that we are the recipient of this payment, so
 -                              // instead we stay symmetric with the forwarding case, only responding (after a
 -                              // delay) once they've send us a commitment_signed!
 +                                              PendingHTLCRouting::ReceiveKeysend {
 +                                                      payment_preimage,
 +                                                      incoming_cltv_expiry: msg.cltv_expiry,
 +                                              }
 +                                      } else {
 +                                              return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
 +                                      }
 +                              },
 +                      };
  
 -                              PendingHTLCStatus::Forward(PendingHTLCInfo {
 -                                      routing: PendingHTLCRouting::Receive {
 -                                              payment_data: payment_data.unwrap(),
 -                                              incoming_cltv_expiry: msg.cltv_expiry,
 -                                      },
 -                                      payment_hash: msg.payment_hash.clone(),
 -                                      incoming_shared_secret: shared_secret,
 -                                      amt_to_forward: next_hop_data.amt_to_forward,
 -                                      outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
 -                              })
 -                      } else {
 -                              let mut new_packet_data = [0; 20*65];
 -                              let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
 -                              #[cfg(debug_assertions)]
 -                              {
 -                                      // Check two things:
 -                                      // a) that the behavior of our stream here will return Ok(0) even if the TLV
 -                                      //    read above emptied out our buffer and the unwrap() wont needlessly panic
 -                                      // b) that we didn't somehow magically end up with extra data.
 -                                      let mut t = [0; 1];
 -                                      debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
 -                              }
 -                              // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
 -                              // fill the onion hop data we'll forward to our next-hop peer.
 -                              chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
 +                      // Note that we could obviously respond immediately with an update_fulfill_htlc
 +                      // message, however that would leak that we are the recipient of this payment, so
 +                      // instead we stay symmetric with the forwarding case, only responding (after a
 +                      // delay) once they've send us a commitment_signed!
 +
 +                      PendingHTLCStatus::Forward(PendingHTLCInfo {
 +                              routing,
 +                              payment_hash: msg.payment_hash.clone(),
 +                              incoming_shared_secret: shared_secret,
 +                              amt_to_forward: next_hop_data.amt_to_forward,
 +                              outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
 +                      })
 +              } else {
 +                      let mut new_packet_data = [0; 20*65];
 +                      let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
 +                      #[cfg(debug_assertions)]
 +                      {
 +                              // Check two things:
 +                              // a) that the behavior of our stream here will return Ok(0) even if the TLV
 +                              //    read above emptied out our buffer and the unwrap() wont needlessly panic
 +                              // b) that we didn't somehow magically end up with extra data.
 +                              let mut t = [0; 1];
 +                              debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
 +                      }
 +                      // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
 +                      // fill the onion hop data we'll forward to our next-hop peer.
 +                      chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
  
 -                              let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
 +                      let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
  
 -                              let blinding_factor = {
 -                                      let mut sha = Sha256::engine();
 -                                      sha.input(&new_pubkey.serialize()[..]);
 -                                      sha.input(&shared_secret);
 -                                      Sha256::from_engine(sha).into_inner()
 -                              };
 -
 -                              let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
 -                                      Err(e)
 -                              } else { Ok(new_pubkey) };
 +                      let blinding_factor = {
 +                              let mut sha = Sha256::engine();
 +                              sha.input(&new_pubkey.serialize()[..]);
 +                              sha.input(&shared_secret);
 +                              Sha256::from_engine(sha).into_inner()
 +                      };
  
 -                              let outgoing_packet = msgs::OnionPacket {
 -                                      version: 0,
 -                                      public_key,
 -                                      hop_data: new_packet_data,
 -                                      hmac: next_hop_hmac.clone(),
 -                              };
 +                      let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
 +                              Err(e)
 +                      } else { Ok(new_pubkey) };
  
 -                              let short_channel_id = match next_hop_data.format {
 -                                      msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
 -                                      msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
 -                                      msgs::OnionHopDataFormat::FinalNode { .. } => {
 -                                              return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
 -                                      },
 -                              };
 +                      let outgoing_packet = msgs::OnionPacket {
 +                              version: 0,
 +                              public_key,
 +                              hop_data: new_packet_data,
 +                              hmac: next_hop_hmac.clone(),
 +                      };
  
 -                              PendingHTLCStatus::Forward(PendingHTLCInfo {
 -                                      routing: PendingHTLCRouting::Forward {
 -                                              onion_packet: outgoing_packet,
 -                                              short_channel_id,
 -                                      },
 -                                      payment_hash: msg.payment_hash.clone(),
 -                                      incoming_shared_secret: shared_secret,
 -                                      amt_to_forward: next_hop_data.amt_to_forward,
 -                                      outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
 -                              })
 +                      let short_channel_id = match next_hop_data.format {
 +                              msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
 +                              msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
 +                              msgs::OnionHopDataFormat::FinalNode { .. } => {
 +                                      return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
 +                              },
                        };
  
 +                      PendingHTLCStatus::Forward(PendingHTLCInfo {
 +                              routing: PendingHTLCRouting::Forward {
 +                                      onion_packet: outgoing_packet,
 +                                      short_channel_id,
 +                              },
 +                              payment_hash: msg.payment_hash.clone(),
 +                              incoming_shared_secret: shared_secret,
 +                              amt_to_forward: next_hop_data.amt_to_forward,
 +                              outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
 +                      })
 +              };
 +
                channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
                                let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
 -                              let forwarding_id = match id_option {
 -                                      None => { // unknown_next_peer
 -                                              return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
 -                                      },
 -                                      Some(id) => id.clone(),
 -                              };
                                if let Some((err, code, chan_update)) = loop {
 +                                      let forwarding_id = match id_option {
 +                                              None => { // unknown_next_peer
 +                                                      break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 +                                              },
 +                                              Some(id) => id.clone(),
 +                                      };
 +
                                        let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
  
 +                                      if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
 +                                              // Note that the behavior here should be identical to the above block - we
 +                                              // should NOT reveal the existence or non-existence of a private channel if
 +                                              // we don't allow forwards outbound over them.
 +                                              break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 +                                      }
 +
                                        // Note that we could technically not return an error yet here and just hope
                                        // that the connection is reestablished or monitor updated by the time we get
                                        // around to doing the actual forward, but better to fail early if we can and
                                        // hopefully an attacker trying to path-trace payments cannot make this occur
                                        // on a small/per-node/per-channel scale.
                                        if !chan.is_live() { // channel_disabled
 -                                              break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
 -                                              break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 -                                      let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
 +                                      let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
 +                                              .and_then(|prop_fee| { (prop_fee / 1000000)
 +                                              .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
 -                                              break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
 -                                              break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
 -                                              break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
 -                                      // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
 -                                      // But, to be safe against policy reception, we use a longuer delay.
 +                                      // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
 +                                      // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
 -                                              break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
 +                                              break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
  
                                        break None;
                (pending_forward_info, channel_state.unwrap())
        }
  
 -      /// only fails if the channel does not yet have an assigned short_id
 +      /// Gets the current channel_update for the given channel. This first checks if the channel is
 +      /// public, and thus should be called whenever the result is going to be passed out in a
 +      /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
 +      ///
        /// May be called with channel_state already locked!
 -      fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
 +      fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
 +              if !chan.should_announce() {
 +                      return Err(LightningError {
 +                              err: "Cannot broadcast a channel_update for a private channel".to_owned(),
 +                              action: msgs::ErrorAction::IgnoreError
 +                      });
 +              }
 +              log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
 +              self.get_channel_update_for_unicast(chan)
 +      }
 +
 +      /// Gets the current channel_update for the given channel. This does not check if the channel
 +      /// is public (only returning an Err if the channel does not yet have an assigned short_id),
 +      /// and thus MUST NOT be called unless the recipient of the resulting message has already
 +      /// provided evidence that they know about the existence of the channel.
 +      /// May be called with channel_state already locked!
 +      fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
 +              log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
 -                      fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
 +                      fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
        }
  
        // Only public for testing, this should otherwise never be called direcly
 -      pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
 +      pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
  
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
 -              let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?;
 +              let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
                if onion_utils::route_size_insane(&onion_payloads) {
                        return Err(APIError::RouteError{err: "Route size too large considering onion data"});
                }
        /// bit set (either as required or as available). If multiple paths are present in the Route,
        /// we assume the invoice had the basic_mpp feature set.
        pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
 +              self.send_payment_internal(route, payment_hash, payment_secret, None)
 +      }
 +
 +      fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>) -> Result<(), PaymentSendFailure> {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
 -                      results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
 +                      results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
                }
        }
  
 +      /// Send a spontaneous payment, which is a payment that does not require the recipient to have
 +      /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
 +      /// the preimage, it must be a cryptographically secure random value that no intermediate node
 +      /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
 +      /// never reach the recipient.
 +      ///
 +      /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
 +      /// [`send_payment`] for more information about the risks of duplicate preimage usage.
 +      ///
 +      /// [`send_payment`]: Self::send_payment
 +      pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
 +              let preimage = match payment_preimage {
 +                      Some(p) => p,
 +                      None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
 +              };
 +              let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
 +              match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) {
 +                      Ok(()) => Ok(payment_hash),
 +                      Err(e) => Err(e)
 +              }
 +      }
 +
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
                        if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg,
 -                                      update_msg: match self.get_channel_update(chan) {
 +                                      update_msg: match self.get_channel_update_for_broadcast(chan) {
                                                Ok(msg) => msg,
                                                Err(_) => continue,
                                        },
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
 -                                                                                      let chan_update = self.get_channel_update(chan.get()).unwrap();
 +                                                                                      let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
                                                                                        ));
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
 -                                                                                      Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
 +                                                                                      Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
                                        for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
 -                                                                      routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
 -                                                                      incoming_shared_secret, payment_hash, amt_to_forward, .. },
 +                                                                      routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
                                                                        prev_funding_outpoint } => {
 +                                                              let (cltv_expiry, onion_payload) = match routing {
 +                                                                      PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } =>
 +                                                                              (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)),
 +                                                                      PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
 +                                                                              (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)),
 +                                                                      _ => {
 +                                                                              panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
 +                                                                      }
 +                                                              };
                                                                let claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
 -                                                                      payment_data: payment_data.clone(),
 -                                                                      cltv_expiry: incoming_cltv_expiry,
 +                                                                      cltv_expiry,
 +                                                                      onion_payload,
                                                                };
  
                                                                macro_rules! fail_htlc {
                                                                let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
 -                                                                              log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
 -                                                                              fail_htlc!(claimable_htlc);
 +                                                                              match claimable_htlc.onion_payload {
 +                                                                                      OnionPayload::Invoice(_) => {
 +                                                                                              log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
 +                                                                                              fail_htlc!(claimable_htlc);
 +                                                                                      },
 +                                                                                      OnionPayload::Spontaneous(preimage) => {
 +                                                                                              match channel_state.claimable_htlcs.entry(payment_hash) {
 +                                                                                                      hash_map::Entry::Vacant(e) => {
 +                                                                                                              e.insert(vec![claimable_htlc]);
 +                                                                                                              new_events.push(events::Event::PaymentReceived {
 +                                                                                                                      payment_hash,
 +                                                                                                                      amt: amt_to_forward,
 +                                                                                                                      purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
 +                                                                                                              });
 +                                                                                                      },
 +                                                                                                      hash_map::Entry::Occupied(_) => {
 +                                                                                                              log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
 +                                                                                                              fail_htlc!(claimable_htlc);
 +                                                                                                      }
 +                                                                                              }
 +                                                                                      }
 +                                                                              }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
 +                                                                              let payment_data =
 +                                                                                      if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
 +                                                                                              data.clone()
 +                                                                                      } else {
 +                                                                                              log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
 +                                                                                              fail_htlc!(claimable_htlc);
 +                                                                                              continue
 +                                                                                      };
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc);
                                                                                        let mut total_value = 0;
                                                                                        let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
                                                                                                .or_insert(Vec::new());
 +                                                                                      if htlcs.len() == 1 {
 +                                                                                              if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
 +                                                                                                      log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
 +                                                                                                      fail_htlc!(claimable_htlc);
 +                                                                                                      continue
 +                                                                                              }
 +                                                                                      }
                                                                                        htlcs.push(claimable_htlc);
                                                                                        for htlc in htlcs.iter() {
                                                                                                total_value += htlc.value;
 -                                                                                              if htlc.payment_data.total_msat != payment_data.total_msat {
 -                                                                                                      log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
 -                                                                                                              log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
 -                                                                                                      total_value = msgs::MAX_VALUE_MSAT;
 +                                                                                              match &htlc.onion_payload {
 +                                                                                                      OnionPayload::Invoice(htlc_payment_data) => {
 +                                                                                                              if htlc_payment_data.total_msat != payment_data.total_msat {
 +                                                                                                                      log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
 +                                                                                                                                                               log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat);
 +                                                                                                                      total_value = msgs::MAX_VALUE_MSAT;
 +                                                                                                              }
 +                                                                                                              if total_value >= msgs::MAX_VALUE_MSAT { break; }
 +                                                                                                      },
 +                                                                                                      _ => unreachable!(),
                                                                                                }
 -                                                                                              if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                        }
                                                                                        if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
                                                                                                log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
                                                                                        } else if total_value == payment_data.total_msat {
                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                        payment_hash,
 -                                                                                                      payment_preimage: inbound_payment.get().payment_preimage,
 -                                                                                                      payment_secret: payment_data.payment_secret,
 +                                                                                                      purpose: events::PaymentPurpose::InvoicePayment {
 +                                                                                                              payment_preimage: inbound_payment.get().payment_preimage,
 +                                                                                                              payment_secret: payment_data.payment_secret,
 +                                                                                                              user_payment_id: inbound_payment.get().user_payment_id,
 +                                                                                                      },
                                                                                                        amt: total_value,
 -                                                                                                      user_payment_id: inbound_payment.get().user_payment_id,
                                                                                                });
                                                                                                // Only ever generate at most one PaymentReceived
                                                                                                // per registered payment_hash, even if it isn't
                                                                        },
                                                                };
                                                        },
 -                                                      HTLCForwardInfo::AddHTLC { .. } => {
 -                                                              panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
 -                                                      },
                                                        HTLCForwardInfo::FailHTLC { .. } => {
                                                                panic!("Got pending fail of our own HTLC");
                                                        }
                                        ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
                                        ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
                                        ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
 -                                              if let Ok(update) = self.get_channel_update(&chan) {
 +                                              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                                chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                        },
                                        ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
 -                                              if let Ok(update) = self.get_channel_update(&chan) {
 +                                              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
 -                                                              if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
 +                                                              if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
                                                                        (0x1000|7, upd.encode_with_len())
                                                                } else {
                                                                        (0x4000|10, Vec::new())
                };
  
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
-                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok((msgs, monitor_option)) => {
-                                       if let Some(monitor_update) = monitor_option {
+                               Ok(msgs_monitor_option) => {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       if was_frozen_for_monitor {
-                                                               assert!(msgs.is_none());
-                                                       } else {
-                                                               return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
-                                                       }
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                               payment_preimage, e);
+                                                       return Err(Some((
+                                                               chan.get().get_counterparty_node_id(),
+                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                       )));
+                                               }
+                                               if let Some((msg, commitment_signed)) = msgs {
+                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               updates: msgs::CommitmentUpdate {
+                                                                       update_add_htlcs: Vec::new(),
+                                                                       update_fulfill_htlcs: vec![msg],
+                                                                       update_fail_htlcs: Vec::new(),
+                                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                                       update_fee: None,
+                                                                       commitment_signed,
+                                                               }
+                                                       });
                                                }
-                                       }
-                                       if let Some((msg, commitment_signed)) = msgs {
-                                               log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                       log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: vec![msg],
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       }
-                                               });
                                        }
                                        return Ok(())
                                },
-                               Err(e) => {
-                                       // TODO: Do something with e?
-                                       // This should only occur if we are claiming an HTLC at the same time as the
-                                       // HTLC is being failed (eg because a block is being connected and this caused
-                                       // an HTLC to time out). This should, of course, only occur if the user is the
-                                       // one doing the claiming (as it being a part of a peer claim would imply we're
-                                       // about to lose funds) and only if the lock in claim_funds was dropped as a
-                                       // previous HTLC was failed (thus not for an MPP payment).
-                                       debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
-                                       return Err(None)
+                               Err((e, monitor_update)) => {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                       payment_preimage, e);
+                                       }
+                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       if drop {
+                                               chan.remove_entry();
+                                       }
+                                       return Err(Some((counterparty_node_id, res)));
                                },
                        }
                } else { unreachable!(); }
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
 -              let (mut pending_failures, chan_restoration_res) = {
 +              let chan_restoration_res;
 +              let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
                        }
  
                        let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
 -                      (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
 +                      let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
 +                              // We only send a channel_update in the case where we are just now sending a
 +                              // funding_locked and the channel is in a usable state. Further, we rely on the
 +                              // normal announcement_signatures process to send a channel_update for public
 +                              // channels, only generating a unicast channel_update if this is a private channel.
 +                              Some(events::MessageSendEvent::SendChannelUpdate {
 +                                      node_id: channel.get().get_counterparty_node_id(),
 +                                      msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
 +                              })
 +                      } else { None };
 +                      chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
 +                      if let Some(upd) = channel_update {
 +                              channel_state.pending_msg_events.push(upd);
 +                      }
 +                      pending_failures
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                for failure in pending_failures.drain(..) {
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
 +                              } else if chan.get().is_usable() {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
 +                                              node_id: counterparty_node_id.clone(),
 +                                              msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
 +                                      });
                                }
                                Ok(())
                        },
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                if let Some(chan) = chan_option {
 -                      if let Ok(update) = self.get_channel_update(&chan) {
 +                      if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                        self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
                }
                if let Some(chan) = chan_option {
 -                      if let Ok(update) = self.get_channel_update(&chan) {
 +                      if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
 -                                                      let reason = if let Ok(upd) = self.get_channel_update(chan) {
 +                                                      let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
                                                                        let mut res = Vec::with_capacity(8 + 128);
                                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
                                        match channel_state.forward_htlcs.entry(match forward_info.routing {
                                                        PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
                                                        PendingHTLCRouting::Receive { .. } => 0,
 +                                                      PendingHTLCRouting::ReceiveKeysend { .. } => 0,
                                        }) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
  
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
 -                                      update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
 +                                      // Note that announcement_signatures fails if the channel cannot be announced,
 +                                      // so get_channel_update_for_broadcast will never fail by the time we get here.
 +                                      update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
                                });
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
 -                              try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
 +                              let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
 +                              let msg_from_node_one = msg.contents.flags & 1 == 0;
 +                              if were_node_one == msg_from_node_one {
 +                                      return Ok(NotifyOption::SkipPersist);
 +                              } else {
 +                                      try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
 +                              }
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
        }
  
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
 -              let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
 +              let chan_restoration_res;
 +              let (htlcs_failed_forward, need_lnd_workaround) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
  
                                        // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                        let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
                                                try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
 +                                      let mut channel_update = None;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
 +                                      } else if chan.get().is_usable() {
 +                                              // If the channel is in a usable state (ie the channel is not being shut
 +                                              // down), send a unicast channel_update to our counterparty to make sure
 +                                              // they have the latest channel parameters.
 +                                              channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
 +                                                      node_id: chan.get().get_counterparty_node_id(),
 +                                                      msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
 +                                              });
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
 -                                      (htlcs_failed_forward, need_lnd_workaround,
 -                                              handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
 +                                      chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
 +                                      if let Some(upd) = channel_update {
 +                                              channel_state.pending_msg_events.push(upd);
 +                                      }
 +                                      (htlcs_failed_forward, need_lnd_workaround)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(false));
 -                                              if let Ok(update) = self.get_channel_update(&chan) {
 +                                              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
        /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
        /// method may return an Err if another payment with the same payment_hash is still pending.
        ///
 -      /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
 +      /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to
        /// allow tracking of which events correspond with which calls to this and
        /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
        /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
        ///
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`PaymentReceived`]: events::Event::PaymentReceived
 -      /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
 +      /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id
        pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
                self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
        }
@@@ -4148,7 -3963,7 +4151,7 @@@ wher
                                let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
 -                                              let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
 +                                              let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
                                                        failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
                                                        data: chan_update,
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
 +                                              } else if channel.is_usable() {
 +                                                      log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
 +                                                              node_id: channel.get_counterparty_node_id(),
 +                                                              msg: self.get_channel_update_for_unicast(channel).unwrap(),
 +                                                      });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
 -                                      if let Ok(update) = self.get_channel_update(&channel) {
 +                                      if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
@@@ -4373,7 -4182,7 +4376,7 @@@ impl<Signer: Sign, M: Deref , T: Deref 
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(true));
 -                                              if let Ok(update) = self.get_channel_update(&chan) {
 +                                              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
 +                                      &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
  
                if msg.channel_id == [0; 32] {
                        for chan in self.list_channels() {
 -                              if chan.remote_network_id == *counterparty_node_id {
 +                              if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
                                        let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
                                }
@@@ -4575,11 -4383,7 +4578,11 @@@ impl_writeable_tlv_based_enum!(PendingH
        (1, Receive) => {
                (0, payment_data, required),
                (2, incoming_cltv_expiry, required),
 -      }
 +      },
 +      (2, ReceiveKeysend) => {
 +              (0, payment_preimage, required),
 +              (2, incoming_cltv_expiry, required),
 +      },
  ;);
  
  impl_writeable_tlv_based!(PendingHTLCInfo, {
@@@ -4606,63 -4410,12 +4609,63 @@@ impl_writeable_tlv_based!(HTLCPreviousH
        (6, incoming_packet_shared_secret, required)
  });
  
 -impl_writeable_tlv_based!(ClaimableHTLC, {
 -      (0, prev_hop, required),
 -      (2, value, required),
 -      (4, payment_data, required),
 -      (6, cltv_expiry, required),
 -});
 +impl Writeable for ClaimableHTLC {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              let payment_data = match &self.onion_payload {
 +                      OnionPayload::Invoice(data) => Some(data.clone()),
 +                      _ => None,
 +              };
 +              let keysend_preimage = match self.onion_payload {
 +                      OnionPayload::Invoice(_) => None,
 +                      OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
 +              };
 +              write_tlv_fields!
 +              (writer,
 +               {
 +                 (0, self.prev_hop, required), (2, self.value, required),
 +                 (4, payment_data, option), (6, self.cltv_expiry, required),
 +                       (8, keysend_preimage, option),
 +               });
 +              Ok(())
 +      }
 +}
 +
 +impl Readable for ClaimableHTLC {
 +      fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
 +              let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
 +              let mut value = 0;
 +              let mut payment_data: Option<msgs::FinalOnionHopData> = None;
 +              let mut cltv_expiry = 0;
 +              let mut keysend_preimage: Option<PaymentPreimage> = None;
 +              read_tlv_fields!
 +              (reader,
 +               {
 +                 (0, prev_hop, required), (2, value, required),
 +                 (4, payment_data, option), (6, cltv_expiry, required),
 +                       (8, keysend_preimage, option)
 +               });
 +              let onion_payload = match keysend_preimage {
 +                      Some(p) => {
 +                              if payment_data.is_some() {
 +                                      return Err(DecodeError::InvalidValue)
 +                              }
 +                              OnionPayload::Spontaneous(p)
 +                      },
 +                      None => {
 +                              if payment_data.is_none() {
 +                                      return Err(DecodeError::InvalidValue)
 +                              }
 +                              OnionPayload::Invoice(payment_data.unwrap())
 +                      },
 +              };
 +              Ok(Self {
 +                      prev_hop: prev_hop.0.unwrap(),
 +                      value,
 +                      onion_payload,
 +                      cltv_expiry,
 +              })
 +      }
 +}
  
  impl_writeable_tlv_based_enum!(HTLCSource,
        (0, OutboundRoute) => {
@@@ -5109,23 -4862,15 +5112,23 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
  
  #[cfg(test)]
  mod tests {
 -      use ln::channelmanager::PersistenceNotifier;
 -      use std::sync::Arc;
 +      use bitcoin::hashes::Hash;
 +      use bitcoin::hashes::sha256::Hash as Sha256;
        use core::sync::atomic::{AtomicBool, Ordering};
 -      use std::thread;
        use core::time::Duration;
 +      use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 +      use ln::channelmanager::PersistenceNotifier;
 +      use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
 -      use ln::features::InitFeatures;
 +      use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
 +      use routing::router::{get_keysend_route, get_route};
 +      use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 +      use util::test_utils;
 +      use std::sync::Arc;
 +      use std::thread;
  
 +      #[cfg(feature = "std")]
        #[test]
        fn test_wait_timeout() {
                let persistence_notifier = Arc::new(PersistenceNotifier::new());
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
 +
 +              // An earlier version of handle_channel_update didn't check the directionality of the
 +              // update message and would always update the local fee info, even if our peer was
 +              // (spuriously) forwarding us our own channel_update.
 +              let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
 +              let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
 +              let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
 +
 +              // First deliver each peers' own message, checking that the node doesn't need to be
 +              // persisted and that its channel info remains the same.
 +              nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
 +              nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
 +              assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
 +              assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
 +
 +              // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
 +              // the channel info has updated.
 +              nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
 +              nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
 +              assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
 +              assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
 +      }
 +
 +      #[test]
 +      fn test_keysend_dup_hash_partial_mpp() {
 +              // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
 +              // expected.
 +              let chanmon_cfgs = create_chanmon_cfgs(2);
 +              let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +              let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +              create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 +              let logger = test_utils::TestLogger::new();
 +
 +              // First, send a partial MPP payment.
 +              let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
 +              let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
 +              let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
 +              // Use the utility function send_payment_along_path to send the payment with MPP data which
 +              // indicates there are more HTLCs coming.
 +              let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
 +
 +              // Next, send a keysend payment with the same payment_hash and make sure it fails.
 +              nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let ev = events.drain(..).next().unwrap();
 +              let payment_event = SendEvent::from_event(ev);
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 +              check_added_monitors!(nodes[1], 0);
 +              commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              check_added_monitors!(nodes[1], 1);
 +              let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              assert!(updates.update_add_htlcs.is_empty());
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert_eq!(updates.update_fail_htlcs.len(), 1);
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
 +              commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
 +              expect_payment_failed!(nodes[0], our_payment_hash, true);
 +
 +              // Send the second half of the original MPP payment.
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
 +
 +              // Claim the full MPP payment. Note that we can't use a test utility like
 +              // claim_funds_along_route because the ordering of the messages causes the second half of the
 +              // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
 +              // lightning messages manually.
 +              assert!(nodes[1].node.claim_funds(payment_preimage));
 +              check_added_monitors!(nodes[1], 2);
 +              let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
 +              nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
 +              check_added_monitors!(nodes[0], 1);
 +              let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +              nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
 +              check_added_monitors!(nodes[1], 1);
 +              let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
 +              check_added_monitors!(nodes[1], 1);
 +              let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +              nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
 +              nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
 +              check_added_monitors!(nodes[0], 1);
 +              let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
 +              let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +              check_added_monitors!(nodes[0], 1);
 +              nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
 +              check_added_monitors!(nodes[1], 1);
 +              nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
 +              check_added_monitors!(nodes[1], 1);
 +              let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
 +              check_added_monitors!(nodes[0], 1);
 +
 +              // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here.
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              match events[0] {
 +                      Event::PaymentSent { payment_preimage: ref preimage } => {
 +                              assert_eq!(payment_preimage, *preimage);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +              match events[1] {
 +                      Event::PaymentSent { payment_preimage: ref preimage } => {
 +                              assert_eq!(payment_preimage, *preimage);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +
 +      #[test]
 +      fn test_keysend_dup_payment_hash() {
 +              // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
 +              //      outbound regular payment fails as expected.
 +              // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
 +              //      fails as expected.
 +              let chanmon_cfgs = create_chanmon_cfgs(2);
 +              let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +              let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +              create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 +              let logger = test_utils::TestLogger::new();
 +
 +              // To start (1), send a regular payment but don't claim it.
 +              let expected_route = [&nodes[1]];
 +              let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
 +
 +              // Next, attempt a keysend payment and make sure it fails.
 +              let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
 +              nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let ev = events.drain(..).next().unwrap();
 +              let payment_event = SendEvent::from_event(ev);
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 +              check_added_monitors!(nodes[1], 0);
 +              commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              check_added_monitors!(nodes[1], 1);
 +              let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              assert!(updates.update_add_htlcs.is_empty());
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert_eq!(updates.update_fail_htlcs.len(), 1);
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
 +              commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
 +              expect_payment_failed!(nodes[0], payment_hash, true);
 +
 +              // Finally, claim the original payment.
 +              claim_payment(&nodes[0], &expected_route, payment_preimage);
 +
 +              // To start (2), send a keysend payment but don't claim it.
 +              let payment_preimage = PaymentPreimage([42; 32]);
 +              let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
 +              let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let event = events.pop().unwrap();
 +              let path = vec![&nodes[1]];
 +              pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
 +
 +              // Next, attempt a regular payment and make sure it fails.
 +              let payment_secret = PaymentSecret([43; 32]);
 +              nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let ev = events.drain(..).next().unwrap();
 +              let payment_event = SendEvent::from_event(ev);
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 +              check_added_monitors!(nodes[1], 0);
 +              commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              check_added_monitors!(nodes[1], 1);
 +              let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              assert!(updates.update_add_htlcs.is_empty());
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert_eq!(updates.update_fail_htlcs.len(), 1);
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
 +              commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
 +              expect_payment_failed!(nodes[0], payment_hash, true);
 +
 +              // Finally, succeed the keysend payment.
 +              claim_payment(&nodes[0], &expected_route, payment_preimage);
 +      }
 +
 +      #[test]
 +      fn test_keysend_hash_mismatch() {
 +              // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
 +              // preimage doesn't match the msg's payment hash.
 +              let chanmon_cfgs = create_chanmon_cfgs(2);
 +              let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +              let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +              let payer_pubkey = nodes[0].node.get_our_node_id();
 +              let payee_pubkey = nodes[1].node.get_our_node_id();
 +              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
 +              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 +
 +              let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
 +              let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
 +              let first_hops = nodes[0].node.list_usable_channels();
 +              let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
 +                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
 +                                  nodes[0].logger).unwrap();
 +
 +              let test_preimage = PaymentPreimage([42; 32]);
 +              let mismatch_payment_hash = PaymentHash([43; 32]);
 +              let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +              assert_eq!(updates.update_add_htlcs.len(), 1);
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert!(updates.update_fail_htlcs.is_empty());
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +
 +              nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
 +      }
 +
 +      #[test]
 +      fn test_keysend_msg_with_secret_err() {
 +              // Test that we error as expected if we receive a keysend payment that includes a payment secret.
 +              let chanmon_cfgs = create_chanmon_cfgs(2);
 +              let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +              let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +              let payer_pubkey = nodes[0].node.get_our_node_id();
 +              let payee_pubkey = nodes[1].node.get_our_node_id();
 +              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
 +              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 +
 +              let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
 +              let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
 +              let first_hops = nodes[0].node.list_usable_channels();
 +              let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
 +                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
 +                                  nodes[0].logger).unwrap();
 +
 +              let test_preimage = PaymentPreimage([42; 32]);
 +              let test_secret = PaymentSecret([43; 32]);
 +              let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
 +              let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +              assert_eq!(updates.update_add_htlcs.len(), 1);
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert!(updates.update_fail_htlcs.is_empty());
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +
 +              nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
        }
  }
  
@@@ -5514,13 -4975,13 +5517,13 @@@ pub mod bench 
        use routing::router::get_route;
        use util::test_utils;
        use util::config::UserConfig;
 -      use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 +      use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
  
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Block, BlockHeader, Transaction, TxOut};
  
 -      use std::sync::{Arc, Mutex};
 +      use sync::{Arc, Mutex};
  
        use test::Bencher;
  
                let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
  
                let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
 -              let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +              let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
  
                let mut config: UserConfig = Default::default();
                config.own_channel_config.minimum_depth = 1;
                Listen::block_connected(&node_b, &block, 1);
  
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
 -              node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
 +              let msg_events = node_a.get_and_clear_pending_msg_events();
 +              assert_eq!(msg_events.len(), 2);
 +              match msg_events[0] {
 +                      MessageSendEvent::SendFundingLocked { ref msg, .. } => {
 +                              node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
 +                              get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
 +                      },
 +                      _ => panic!(),
 +              }
 +              match msg_events[1] {
 +                      MessageSendEvent::SendChannelUpdate { .. } => {},
 +                      _ => panic!(),
 +              }
  
                let dummy_graph = NetworkGraph::new(genesis_hash);
  
index 89b185699993838e120f1c3f80e9bee4b2928c05,a6d82ccee525ad164023620d0b59d409819c0692..d1d322bfa95f08b3906cea5d68975b66500fe5f9
@@@ -23,7 -23,7 +23,7 @@@ use ln::msgs::{ChannelMessageHandler,Ro
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::test_utils;
  use util::test_utils::TestChainMonitor;
 -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
  use util::errors::APIError;
  use util::config::UserConfig;
  use util::ser::{ReadableArgs, Writeable, Readable};
@@@ -42,7 -42,7 +42,7 @@@ use bitcoin::secp256k1::key::PublicKey
  use prelude::*;
  use core::cell::RefCell;
  use std::rc::Rc;
 -use std::sync::{Arc, Mutex};
 +use sync::{Arc, Mutex};
  use core::mem;
  
  pub const CHAN_CONFIRM_DEPTH: u32 = 10;
@@@ -269,7 -269,7 +269,7 @@@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 
                        // Check that if we serialize and then deserialize all our channel monitors we get the
                        // same set of outputs to watch for on chain as we have now. Note that if we write
                        // tests that fully close channels and remove the monitors at some point this may break.
 -                      let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +                      let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                        let mut deserialized_monitors = Vec::new();
                        {
                                let old_monitors = self.chain_monitor.chain_monitor.monitors.read().unwrap();
                                <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
                                        default_config: *self.node.get_current_default_configuration(),
                                        keys_manager: self.keys_manager,
 -                                      fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 },
 +                                      fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
                                        chain_monitor: self.chain_monitor,
                                        tx_broadcaster: &test_utils::TestBroadcaster {
                                                txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
@@@ -976,16 -976,11 +976,16 @@@ macro_rules! expect_payment_received 
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
 -                      Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +                      Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                                assert_eq!($expected_payment_hash, *payment_hash);
 -                              assert!(payment_preimage.is_none());
 -                              assert_eq!($expected_payment_secret, *payment_secret);
                                assert_eq!($expected_recv_value, amt);
 +                              match purpose {
 +                                      PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                              assert!(payment_preimage.is_none());
 +                                              assert_eq!($expected_payment_secret, *payment_secret);
 +                                      },
 +                                      _ => {},
 +                              }
                        },
                        _ => panic!("Unexpected event"),
                }
@@@ -1056,7 -1051,7 +1056,7 @@@ pub fn send_along_route_with_secret<'a
        pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret);
  }
  
 -pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret, ev: MessageSendEvent, payment_received_expected: bool) {
 +pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option<PaymentPreimage>) {
        let mut payment_event = SendEvent::from_event(ev);
        let mut prev_node = origin_node;
  
                        if payment_received_expected {
                                assert_eq!(events_2.len(), 1);
                                match events_2[0] {
 -                                      Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +                                      Event::PaymentReceived { ref payment_hash, ref purpose, amt} => {
                                                assert_eq!(our_payment_hash, *payment_hash);
 -                                              assert!(payment_preimage.is_none());
 -                                              assert_eq!(our_payment_secret, *payment_secret);
 +                                              match &purpose {
 +                                                      PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                                              assert_eq!(expected_preimage, *payment_preimage);
 +                                                              assert_eq!(our_payment_secret.unwrap(), *payment_secret);
 +                                                      },
 +                                                      PaymentPurpose::SpontaneousPayment(payment_preimage) => {
 +                                                              assert_eq!(expected_preimage.unwrap(), *payment_preimage);
 +                                                              assert!(our_payment_secret.is_none());
 +                                                      },
 +                                              }
                                                assert_eq!(amt, recv_value);
                                        },
                                        _ => panic!("Unexpected event"),
@@@ -1112,7 -1099,7 +1112,7 @@@ pub fn pass_along_route<'a, 'b, 'c>(ori
                // Once we've gotten through all the HTLCs, the last one should result in a
                // PaymentReceived (but each previous one should not!), .
                let expect_payment = path_idx == expected_route.len() - 1;
 -              pass_along_path(origin_node, expected_path, recv_value, our_payment_hash.clone(), our_payment_secret, ev, expect_payment);
 +              pass_along_path(origin_node, expected_path, recv_value, our_payment_hash.clone(), Some(our_payment_secret), ev, expect_payment, None);
        }
  }
  
@@@ -1219,10 -1206,7 +1219,10 @@@ pub const TEST_FINAL_CLTV: u32 = 70
  pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
        let net_graph_msg_handler = &origin_node.net_graph_msg_handler;
        let logger = test_utils::TestLogger::new();
 -      let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), recv_value, TEST_FINAL_CLTV, &logger).unwrap();
 +      let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
 +              &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()),
 +              Some(&origin_node.node.list_usable_channels().iter().collect::<Vec<_>>()), &[],
 +              recv_value, TEST_FINAL_CLTV, &logger).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
@@@ -1332,7 -1316,7 +1332,7 @@@ pub fn create_chanmon_cfgs(node_count: 
                        txn_broadcasted: Mutex::new(Vec::new()),
                        blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 0)])),
                };
 -              let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +              let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                let logger = test_utils::TestLogger::with_id(format!("node {}", i));
                let persister = test_utils::TestPersister::new();
@@@ -1357,29 -1341,22 +1357,29 @@@ pub fn create_node_cfgs<'a>(node_count
        nodes
  }
  
 +pub fn test_default_channel_config() -> UserConfig {
 +      let mut default_config = UserConfig::default();
 +      // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our
 +      // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT).
 +      default_config.channel_options.cltv_expiry_delta = 6*6;
 +      default_config.channel_options.announced_channel = true;
 +      default_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      // When most of our tests were written, the default HTLC minimum was fixed at 1000.
 +      // It now defaults to 1, so we simply set it to the expected value here.
 +      default_config.own_channel_config.our_htlc_minimum_msat = 1000;
 +      default_config
 +}
 +
  pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingSigner, &'a TestChainMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
        let mut chanmgrs = Vec::new();
        for i in 0..node_count {
 -              let mut default_config = UserConfig::default();
 -              // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our
 -              // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT).
 -              default_config.channel_options.cltv_expiry_delta = 6*6;
 -              default_config.channel_options.announced_channel = true;
 -              default_config.peer_channel_config_limits.force_announced_channel_preference = false;
 -              default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit
                let network = Network::Testnet;
                let params = ChainParameters {
                        network,
                        best_block: BestBlock::from_genesis(network),
                };
 -              let node = ChannelManager::new(cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger, cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, params);
 +              let node = ChannelManager::new(cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger, cfgs[i].keys_manager,
 +                      if node_config[i].is_some() { node_config[i].clone().unwrap() } else { test_default_channel_config() }, params);
                chanmgrs.push(node);
        }
  
@@@ -1606,20 -1583,18 +1606,20 @@@ macro_rules! handle_chan_reestablish_ms
                        let mut revoke_and_ack = None;
                        let mut commitment_update = None;
                        let order = if let Some(ev) = msg_events.get(idx) {
 -                              idx += 1;
                                match ev {
                                        &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                revoke_and_ack = Some(msg.clone());
 +                                              idx += 1;
                                                RAACommitmentOrder::RevokeAndACKFirst
                                        },
                                        &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                commitment_update = Some(updates.clone());
 +                                              idx += 1;
                                                RAACommitmentOrder::CommitmentFirst
                                        },
 +                                      &MessageSendEvent::SendChannelUpdate { .. } => RAACommitmentOrder::CommitmentFirst,
                                        _ => panic!("Unexpected event"),
                                }
                        } else {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                assert!(revoke_and_ack.is_none());
                                                revoke_and_ack = Some(msg.clone());
 +                                              idx += 1;
                                        },
                                        &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                assert!(commitment_update.is_none());
                                                commitment_update = Some(updates.clone());
 +                                              idx += 1;
                                        },
 +                                      &MessageSendEvent::SendChannelUpdate { .. } => {},
                                        _ => panic!("Unexpected event"),
                                }
                        }
  
 +                      if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
 +                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                              assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
 +                      }
 +
                        (funding_locked, revoke_and_ack, commitment_update, order)
                }
        }
  
  /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
  /// for claims/fails they are separated out.
- pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
+ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
        node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
        }
  
        // We don't yet support both needing updates, as that would require a different commitment dance:
-       assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
-                       (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
+       assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0 &&
+                        pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
+                       (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0 &&
+                        pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
  
        for chan_msgs in resp_1.drain(..) {
                if send_funding_locked.0 {
                } else {
                        assert!(chan_msgs.1.is_none());
                }
-               if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+               if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
                        let commitment_update = chan_msgs.2.unwrap();
                        if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
                                assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
                                assert!(commitment_update.update_add_htlcs.is_empty());
                        }
                        assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
-                       assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+                       assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0 + pending_cell_htlc_fails.0);
                        assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
                        for update_add in commitment_update.update_add_htlcs {
                                node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add);
                } else {
                        assert!(chan_msgs.1.is_none());
                }
-               if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+               if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
                        let commitment_update = chan_msgs.2.unwrap();
                        if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
                                assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
                        }
-                       assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
-                       assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+                       assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
+                       assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.1 + pending_cell_htlc_fails.1);
                        assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
                        for update_add in commitment_update.update_add_htlcs {
                                node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add);
index d866693ffec93a73dc54ac085bedbf66db663da9,f296cb3a7e7b0e9bdec20803eac57d83237a9a25..a13b06d8d993f34a80f61e5ffe11a8876bf8ba66
@@@ -22,15 -22,13 +22,15 @@@ use ln::channel::{COMMITMENT_TX_BASE_WE
  use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
  use ln::channel::{Channel, ChannelError};
  use ln::{chan_utils, onion_utils};
 -use routing::router::{Route, RouteHop, get_route};
 +use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
 +use routing::router::{Route, RouteHop, RouteHint, RouteHintHop, get_route, get_keysend_route};
 +use routing::network_graph::RoutingFees;
  use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
  use ln::msgs;
  use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::{byte_utils, test_utils};
 -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
@@@ -54,7 -52,7 +54,7 @@@ use regex
  use prelude::*;
  use alloc::collections::BTreeSet;
  use core::default::Default;
 -use std::sync::{Arc, Mutex};
 +use sync::{Arc, Mutex};
  
  use ln::functional_test_utils::*;
  use ln::chan_utils::CommitmentTransaction;
@@@ -1041,8 -1039,7 +1041,8 @@@ fn do_test_shutdown_rebroadcast(recv_co
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                node_0_2nd_shutdown
        } else {
 -              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +              let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 +              assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
        };
@@@ -1585,7 -1582,7 +1585,7 @@@ fn test_fee_spike_violation_fails_htlc(
        let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
  
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
 -      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, &Some(payment_secret), cur_height).unwrap();
 +      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, &Some(payment_secret), cur_height, &None).unwrap();
        let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
        let msg = msgs::UpdateAddHTLC {
                channel_id: chan.2,
@@@ -1701,24 -1698,14 +1701,24 @@@ fn test_chan_reserve_violation_outbound
        // sending any above-dust amount would result in a channel reserve violation.
        // In this test we check that we would be prevented from sending an HTLC in
        // this situation.
 -      chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
 -      chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
 +      let feerate_per_kw = 253;
 +      chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
 +      chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 -      let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
  
 -      let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 4843000);
 +      let mut push_amt = 100_000_000;
 +      push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
 +      push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
 +
 +      let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
 +
 +      // Sending exactly enough to hit the reserve amount should be accepted
 +      let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
 +
 +      // However one more HTLC should be significantly over the reserve amount and fail.
 +      let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
        unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
                assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@@ -1733,8 -1720,8 +1733,8 @@@ fn test_chan_reserve_violation_inbound_
        // to channel reserve violation. This close could also happen if the fee went
        // up a more realistic amount, but many HTLCs were outstanding at the time of
        // the update_add_htlc.
 -      chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
 -      chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
 +      chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
 +      chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
        let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
 -      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height).unwrap();
 +      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap();
        let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
        let msg = msgs::UpdateAddHTLC {
                channel_id: chan.2,
  fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
        // calculating our commitment transaction fee (this was previously broken).
 -      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let mut chanmon_cfgs = create_chanmon_cfgs(2);
 +      let feerate_per_kw = 253;
 +      chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
 +      chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
 +
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
        // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
        // transaction fee with 0 HTLCs (183 sats)).
 -      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98817000, InitFeatures::known(), InitFeatures::known());
 +      let mut push_amt = 100_000_000;
 +      push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000;
 +      push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
  
 -      let dust_amt = 329000; // Dust amount
 +      let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
 +              + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
        // commitment transaction fee.
        let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
 +
 +      // One more than the dust amt should fail, however.
 +      let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
 +      unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
 +              assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
  }
  
  #[test]
@@@ -1872,7 -1846,7 +1872,7 @@@ fn test_chan_reserve_violation_inbound_
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
        let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
 -      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route_2.paths[0], recv_value_2, &None, cur_height).unwrap();
 +      let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route_2.paths[0], recv_value_2, &None, cur_height, &None).unwrap();
        let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1);
        let msg = msgs::UpdateAddHTLC {
                channel_id: chan.2,
@@@ -1920,11 -1894,7 +1920,11 @@@ fn commit_tx_fee_msat(feerate: u32, num
  fn test_channel_reserve_holding_cell_htlcs() {
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 239;
 +      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known());
        let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, InitFeatures::known(), InitFeatures::known());
                }}
        }
  
 -      let feemsat = 239; // somehow we know?
 +      let feemsat = 239; // set above
        let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
        let feerate = get_feerate!(nodes[0], chan_1.2);
  
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(our_payment_hash_21, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(our_payment_secret_21, *payment_secret);
                        assert_eq!(recv_value_21, amt);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(our_payment_secret_21, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
        match events[1] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(our_payment_hash_22, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(our_payment_secret_22, *payment_secret);
                        assert_eq!(recv_value_22, amt);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(our_payment_secret_22, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
@@@ -3421,7 -3381,7 +3421,7 @@@ fn fail_backward_pending_htlc_upon_chan
                let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
                let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
                let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
 -              let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height).unwrap();
 +              let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height, &None).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
                let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
  
@@@ -3577,7 -3537,7 +3577,7 @@@ fn test_dup_events_on_peer_disconnect(
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
  }
  
@@@ -3593,7 -3553,7 +3593,7 @@@ fn test_simple_peer_disconnect() 
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
        fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
  
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
        {
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
@@@ -3719,19 -3679,19 +3719,19 @@@ fn do_test_drop_messages_peer_disconnec
                }
                // Even if the funding_locked messages get exchanged, as long as nothing further was
                // received on either side, both sides will need to resend them.
-               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        } else if messages_delivered == 3 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
        } else if messages_delivered == 4 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        } else if messages_delivered == 5 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
        } else if messages_delivered == 6 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
  
        let events_1 = nodes[1].node.get_and_clear_pending_events();
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        nodes[1].node.process_pending_htlc_forwards();
  
        let events_2 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_2.len(), 1);
        match events_2[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
                        assert_eq!(payment_hash_1, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_1, *payment_secret);
                        assert_eq!(amt, 1000000);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_1, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        if messages_delivered < 2 {
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
                if messages_delivered < 1 {
                        let events_4 = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events_4.len(), 1);
                }
        } else if messages_delivered == 2 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
        } else if messages_delivered == 3 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        } else if messages_delivered == 4 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
        } else if messages_delivered == 5 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        // Channel should still work fine...
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
@@@ -3904,7 -3859,7 +3904,7 @@@ fn test_funding_peer_disconnect() 
                _ => panic!("Unexpected event"),
        }
  
-       reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
                _ => panic!("Unexpected event"),
        };
  
-       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked);
        nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
  
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        // as_announcement should be re-generated exactly by broadcast_node_announcement.
        nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
@@@ -4153,15 -4108,10 +4153,15 @@@ fn test_drop_messages_peer_disconnect_d
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
 -              Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt: _, user_payment_id: _ } => {
 +              Event::PaymentReceived { ref payment_hash, ref purpose, .. } => {
                        assert_eq!(payment_hash_2, *payment_hash);
 -                      assert!(payment_preimage.is_none());
 -                      assert_eq!(payment_secret_2, *payment_secret);
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
 +                                      assert!(payment_preimage.is_none());
 +                                      assert_eq!(payment_secret_2, *payment_secret);
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
@@@ -4191,13 -4141,13 +4191,13 @@@ fn do_test_htlc_timeout(send_partial_mp
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
 -              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height).unwrap();
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
                // hop should *not* yet generate any PaymentReceived event(s).
 -              pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, payment_secret, events.drain(..).next().unwrap(), false);
 +              pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
                our_payment_hash
        } else {
                route_payment(&nodes[0], &[&nodes[1]], 100000).1
@@@ -4412,7 -4362,7 +4412,7 @@@ fn test_no_txn_manager_serialize_deseri
        nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
  
        logger = test_utils::TestLogger::new();
 -      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        let keys_manager = &chanmon_cfgs[0].keys_manager;
        new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
@@@ -4627,7 -4577,7 +4627,7 @@@ fn test_manager_serialize_deserialize_e
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
  
 -      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        logger = test_utils::TestLogger::new();
        persister = test_utils::TestPersister::new();
        let keys_manager = &chanmon_cfgs[0].keys_manager;
@@@ -4715,7 -4665,7 +4715,7 @@@ fn test_simple_manager_serialize_deseri
        nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
  
        logger = test_utils::TestLogger::new();
 -      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        let keys_manager = &chanmon_cfgs[0].keys_manager;
        new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
  
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
        claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
@@@ -4795,7 -4745,7 +4795,7 @@@ fn test_manager_serialize_deserialize_i
        }
  
        logger = test_utils::TestLogger::new();
 -      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        let keys_manager = &chanmon_cfgs[0].keys_manager;
        new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
        nodes[0].node = &nodes_0_deserialized;
  
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-       reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
  
@@@ -5393,12 -5343,7 +5393,12 @@@ fn test_duplicate_payment_hash_one_fail
        // we forward one of the payments onwards to D.
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 196;
 +      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
 +              &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
  
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
@@@ -5585,12 -5530,7 +5585,12 @@@ fn do_test_fail_backwards_unrevoked_rem
        // And test where C fails back to A/B when D announces its latest commitment transaction
        let chanmon_cfgs = create_chanmon_cfgs(6);
        let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 196;
 +      let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
 +              &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
        let logger = test_utils::TestLogger::new();
  
@@@ -6440,11 -6380,7 +6440,11 @@@ fn test_free_and_fail_holding_cell_htlc
  fn test_fail_holding_cell_htlc_upon_free_multihop() {
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 196;
 +      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
        let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
@@@ -6817,7 -6753,7 +6817,7 @@@ fn test_update_add_htlc_bolt2_receiver_
  
        let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
 -      let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height).unwrap();
 +      let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height, &None).unwrap();
        let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
  
        let mut msg = msgs::UpdateAddHTLC {
@@@ -7662,7 -7598,7 +7662,7 @@@ fn test_user_configurable_csv_delay() 
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
 -      if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
 +      if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
                match error {
                        APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
                        _ => panic!("Unexpected event"),
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
 -      if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
 +      if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
                        _ => panic!("Unexpected event"),
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
 -      if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
 +      if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
                        _ => panic!("Unexpected event"),
@@@ -7749,7 -7685,7 +7749,7 @@@ fn test_data_loss_protect() 
        let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
        chain_source = test_utils::TestChainSource::new(Network::Testnet);
        tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
 -      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +      fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
        node_state_0 = {
@@@ -7960,168 -7896,6 +7960,168 @@@ fn test_announce_disable_channels() 
        }
  }
  
 +#[test]
 +fn test_priv_forwarding_rejection() {
 +      // If we have a private channel with outbound liquidity, and
 +      // UserConfig::accept_forwards_to_priv_channels is set to false, we should reject any attempts
 +      // to forward through that channel.
 +      let chanmon_cfgs = create_chanmon_cfgs(3);
 +      let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 +      let mut no_announce_cfg = test_default_channel_config();
 +      no_announce_cfg.channel_options.announced_channel = false;
 +      no_announce_cfg.accept_forwards_to_priv_channels = false;
 +      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(no_announce_cfg), None]);
 +      let persister: test_utils::TestPersister;
 +      let new_chain_monitor: test_utils::TestChainMonitor;
 +      let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
 +      let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 +
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
 +
 +      // Note that the create_*_chan functions in utils requires announcement_signatures, which we do
 +      // not send for private channels.
 +      nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
 +      let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
 +      let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_accept_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
 +
 +      let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[1], 1_000_000, 42);
 +      nodes[1].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
 +      nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id()));
 +      check_added_monitors!(nodes[2], 1);
 +
 +      nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()));
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
 +      confirm_transaction_at(&nodes[1], &tx, conf_height);
 +      connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
 +      confirm_transaction_at(&nodes[2], &tx, conf_height);
 +      connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
 +      let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
 +      nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
 +      get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
 +      get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 +
 +      assert!(nodes[0].node.list_usable_channels()[0].is_public);
 +      assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
 +      assert!(!nodes[2].node.list_usable_channels()[0].is_public);
 +
 +      // We should always be able to forward through nodes[1] as long as its out through a public
 +      // channel:
 +      send_payment(&nodes[2], &[&nodes[1], &nodes[0]], 10_000);
 +
 +      // ... however, if we send to nodes[2], we will have to pass the private channel from nodes[1]
 +      // to nodes[2], which should be rejected:
 +      let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
 +      let route = get_route(&nodes[0].node.get_our_node_id(),
 +              &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
 +              &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None,
 +              &[&RouteHint(vec![RouteHintHop {
 +                      src_node_id: nodes[1].node.get_our_node_id(),
 +                      short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(),
 +                      fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 },
 +                      cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
 +                      htlc_minimum_msat: None,
 +                      htlc_maximum_msat: None,
 +              }])], 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap();
 +
 +      nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 +      commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
 +
 +      let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(htlc_fail_updates.update_add_htlcs.is_empty());
 +      assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1);
 +      assert!(htlc_fail_updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(htlc_fail_updates.update_fee.is_none());
 +
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
 +      commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true);
 +      expect_payment_failed!(nodes[0], our_payment_hash, false);
 +      expect_payment_failure_chan_update!(nodes[0], nodes[2].node.list_channels()[0].short_channel_id.unwrap(), true);
 +
 +      // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
 +      // to true. Sadly there is currently no way to change it at runtime.
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +
 +      let nodes_1_serialized = nodes[1].node.encode();
 +      let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new());
 +      let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new());
 +      {
 +              let mons = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap();
 +              let mut mon_iter = mons.iter();
 +              mon_iter.next().unwrap().1.write(&mut monitor_a_serialized).unwrap();
 +              mon_iter.next().unwrap().1.write(&mut monitor_b_serialized).unwrap();
 +      }
 +
 +      persister = test_utils::TestPersister::new();
 +      let keys_manager = &chanmon_cfgs[1].keys_manager;
 +      new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
 +      nodes[1].chain_monitor = &new_chain_monitor;
 +
 +      let mut monitor_a_read = &monitor_a_serialized.0[..];
 +      let mut monitor_b_read = &monitor_b_serialized.0[..];
 +      let (_, mut monitor_a) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_a_read, keys_manager).unwrap();
 +      let (_, mut monitor_b) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_b_read, keys_manager).unwrap();
 +      assert!(monitor_a_read.is_empty());
 +      assert!(monitor_b_read.is_empty());
 +
 +      no_announce_cfg.accept_forwards_to_priv_channels = true;
 +
 +      let mut nodes_1_read = &nodes_1_serialized[..];
 +      let (_, nodes_1_deserialized_tmp) = {
 +              let mut channel_monitors = HashMap::new();
 +              channel_monitors.insert(monitor_a.get_funding_txo().0, &mut monitor_a);
 +              channel_monitors.insert(monitor_b.get_funding_txo().0, &mut monitor_b);
 +              <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
 +                      default_config: no_announce_cfg,
 +                      keys_manager,
 +                      fee_estimator: node_cfgs[1].fee_estimator,
 +                      chain_monitor: nodes[1].chain_monitor,
 +                      tx_broadcaster: nodes[1].tx_broadcaster.clone(),
 +                      logger: nodes[1].logger,
 +                      channel_monitors,
 +              }).unwrap()
 +      };
 +      assert!(nodes_1_read.is_empty());
 +      nodes_1_deserialized = nodes_1_deserialized_tmp;
 +
 +      assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
 +      assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
 +      check_added_monitors!(nodes[1], 2);
 +      nodes[1].node = &nodes_1_deserialized;
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
 +      get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 +      get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
 +      nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
 +      let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
 +      nodes[1].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &cs_reestablish);
 +      get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
 +      get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 +
 +      nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 10_000, our_payment_hash, our_payment_secret);
 +      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage);
 +}
 +
  #[test]
  fn test_bump_penalty_txn_on_revoked_commitment() {
        // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
@@@ -8668,14 -8442,9 +8668,14 @@@ fn test_preimage_storage() 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
 -              Event::PaymentReceived { payment_preimage, user_payment_id, .. } => {
 -                      assert_eq!(user_payment_id, 42);
 -                      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
 +              Event::PaymentReceived { ref purpose, .. } => {
 +                      match &purpose {
 +                              PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => {
 +                                      assert_eq!(*user_payment_id, 42);
 +                                      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
 +                              },
 +                              _ => panic!("expected PaymentPurpose::InvoicePayment")
 +                      }
                },
                _ => panic!("Unexpected event"),
        }
@@@ -8739,7 -8508,7 +8739,7 @@@ fn test_secret_timeout() 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
 -              Event::PaymentReceived { payment_preimage, payment_secret, user_payment_id, .. } => {
 +              Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => {
                        assert!(payment_preimage.is_none());
                        assert_eq!(user_payment_id, 42);
                        assert_eq!(payment_secret, our_payment_secret);
@@@ -9594,65 -9363,8 +9594,65 @@@ fn do_test_tx_confirmed_skipping_blocks
                expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true);
        }
  }
 +
  #[test]
  fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
        do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
        do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
  }
 +
 +#[test]
 +fn test_keysend_payments_to_public_node() {
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
 +      let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
 +      let payer_pubkey = nodes[0].node.get_our_node_id();
 +      let payee_pubkey = nodes[1].node.get_our_node_id();
 +      let route = get_route(&payer_pubkey, &network_graph, &payee_pubkey, None,
 +                        None, &vec![], 10000, 40,
 +                        nodes[0].logger).unwrap();
 +
 +      let test_preimage = PaymentPreimage([42; 32]);
 +      let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let event = events.pop().unwrap();
 +      let path = vec![&nodes[1]];
 +      pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
 +      claim_payment(&nodes[0], &path, test_preimage);
 +}
 +
 +#[test]
 +fn test_keysend_payments_to_private_node() {
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      let payer_pubkey = nodes[0].node.get_our_node_id();
 +      let payee_pubkey = nodes[1].node.get_our_node_id();
 +      nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
 +      nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 +
 +      let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
 +      let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
 +      let first_hops = nodes[0].node.list_usable_channels();
 +      let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
 +                                Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
 +                                nodes[0].logger).unwrap();
 +
 +      let test_preimage = PaymentPreimage([42; 32]);
 +      let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let event = events.pop().unwrap();
 +      let path = vec![&nodes[1]];
 +      pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
 +      claim_payment(&nodes[0], &path, test_preimage);
 +}
index 0f8c90c87aa1fec58d296c140ca32f57e02d569d,39a3735353ebfec1864638fd3386af9e5239c77f..5ddc5933f8d7d1810aa727dad5381372c53a71fc
  
  use chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
  use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 -use ln::channelmanager::HTLCForwardInfo;
 +use ln::channelmanager::{HTLCForwardInfo, CLTV_FAR_FAR_AWAY};
  use ln::onion_utils;
  use routing::router::{Route, get_route};
  use ln::features::{InitFeatures, InvoiceFeatures};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler, HTLCFailChannelUpdate, OptionalField};
 +use ln::msgs::{ChannelMessageHandler, ChannelUpdate, HTLCFailChannelUpdate, OptionalField};
  use util::test_utils;
  use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
  use util::ser::{Writeable, Writer};
@@@ -29,7 -29,6 +29,7 @@@ use bitcoin::hash_types::BlockHash
  use bitcoin::hashes::sha256::Hash as Sha256;
  use bitcoin::hashes::Hash;
  
 +use bitcoin::secp256k1;
  use bitcoin::secp256k1::Secp256k1;
  use bitcoin::secp256k1::key::SecretKey;
  
@@@ -241,58 -240,17 +241,58 @@@ impl Writeable for BogusOnionHopData 
        }
  }
  
 +const BADONION: u16 = 0x8000;
 +const PERM: u16 = 0x4000;
 +const NODE: u16 = 0x2000;
 +const UPDATE: u16 = 0x1000;
 +
  #[test]
 -fn test_onion_failure() {
 -      use ln::msgs::ChannelUpdate;
 -      use ln::channelmanager::CLTV_FAR_FAR_AWAY;
 -      use bitcoin::secp256k1;
 +fn test_fee_failures() {
 +      // Tests that the fee required when forwarding remains consistent over time. This was
 +      // previously broken, with forwarding fees floating based on the fee estimator at the time of
 +      // forwarding.
 +      //
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 196;
 +
 +      let chanmon_cfgs = create_chanmon_cfgs(3);
 +      let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
 +      let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 +      let channels = [create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()), create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known())];
 +      let logger = test_utils::TestLogger::new();
 +      let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40_000, TEST_FINAL_CLTV, &logger).unwrap();
 +
 +      // positive case
 +      let (payment_preimage_success, payment_hash_success, payment_secret_success) = get_payment_preimage_hash!(nodes[2]);
 +      nodes[0].node.send_payment(&route, payment_hash_success, &Some(payment_secret_success)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 40_000, payment_hash_success, payment_secret_success);
 +      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_success);
 +
 +      let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
 +      run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
 +              msg.amount_msat -= 1;
 +      }, || {}, true, Some(UPDATE|12), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
  
 -      const BADONION: u16 = 0x8000;
 -      const PERM: u16 = 0x4000;
 -      const NODE: u16 = 0x2000;
 -      const UPDATE: u16 = 0x1000;
 +      // In an earlier version, we spuriously failed to forward payments if the expected feerate
 +      // changed between the channel open and the payment.
 +      {
 +              let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
 +              *feerate_lock *= 2;
 +      }
  
 +      let (payment_preimage_success, payment_hash_success, payment_secret_success) = get_payment_preimage_hash!(nodes[2]);
 +      nodes[0].node.send_payment(&route, payment_hash_success, &Some(payment_secret_success)).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 40_000, payment_hash_success, payment_secret_success);
 +      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_success);
 +}
 +
 +#[test]
 +fn test_onion_failure() {
        // When we check for amount_below_minimum below, we want to test that we're using the *right*
        // amount, thus we need different htlc_minimum_msat values. We set node[2]'s htlc_minimum_msat
        // to 2000, which is above the default value of 1000 set in create_node_chanmgrs.
        node_2_cfg.channel_options.announced_channel = true;
        node_2_cfg.peer_channel_config_limits.force_announced_channel_preference = false;
  
 +      // When this test was written, the default base fee floated based on the HTLC count.
 +      // It is now fixed, so we simply set the fee to the expected value here.
 +      let mut config = test_default_channel_config();
 +      config.channel_options.forwarding_fee_base_msat = 196;
 +
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(node_2_cfg)]);
 +      let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(node_2_cfg)]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        for node in nodes.iter() {
                *node.keys_manager.override_session_priv.lock().unwrap() = Some([3; 32]);
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let cur_height = nodes[0].best_block_info().1 + 1;
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
 -              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, cur_height).unwrap();
 +              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, cur_height, &None).unwrap();
                let mut new_payloads = Vec::new();
                for payload in onion_payloads.drain(..) {
                        new_payloads.push(BogusOnionHopData::new(payload));
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let cur_height = nodes[0].best_block_info().1 + 1;
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
 -              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, cur_height).unwrap();
 +              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, cur_height, &None).unwrap();
                let mut new_payloads = Vec::new();
                for payload in onion_payloads.drain(..) {
                        new_payloads.push(BogusOnionHopData::new(payload));
                nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
                nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        }, true, Some(UPDATE|20), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
-       reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
        run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let height = nodes[2].best_block_info().1;
                route.paths[0][1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.paths[0][0].cltv_expiry_delta + 1;
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
 -              let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, height).unwrap();
 +              let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, height, &None).unwrap();
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
                msg.cltv_expiry = htlc_cltv;
                msg.onion_routing_packet = onion_packet;