Merge pull request #1619 from G8XSU/main
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 3 Aug 2022 17:37:51 +0000 (17:37 +0000)
committerGitHub <noreply@github.com>
Wed, 3 Aug 2022 17:37:51 +0000 (17:37 +0000)
Add config support for 'their_channel_reserve_proportional_millionths' [#1498]

53 files changed:
.github/workflows/build.yml
CHANGELOG.md
CONTRIBUTING.md
README.md
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/poll.rs
lightning-invoice/Cargo.toml
lightning-invoice/src/payment.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning-rapid-gossip-sync/src/processing.rs
lightning/Cargo.toml
lightning/src/chain/chaininterface.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/mod.rs
lightning/src/chain/package.rs
lightning/src/debug_sync.rs
lightning/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/onion_message/blinded_route.rs [new file with mode: 0644]
lightning/src/onion_message/functional_tests.rs [new file with mode: 0644]
lightning/src/onion_message/messenger.rs [new file with mode: 0644]
lightning/src/onion_message/mod.rs [new file with mode: 0644]
lightning/src/onion_message/packet.rs [new file with mode: 0644]
lightning/src/onion_message/utils.rs [new file with mode: 0644]
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/events.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs

index 68cf376235f7edcb1e812da6027da9e608e97ed2..cfb1b9c847420dc0cb4ee0edb9188033a738e26f 100644 (file)
@@ -232,14 +232,14 @@ jobs:
           EXPECTED_ROUTING_GRAPH_SNAPSHOT_SHASUM: 05a5361278f68ee2afd086cc04a1f927a63924be451f3221d380533acfacc303
       - name: Fetch rapid graph sync reference input
         run: |
-          curl --verbose -L -o lightning-rapid-gossip-sync/res/full_graph.lngossip https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin
+          curl --verbose -L -o lightning-rapid-gossip-sync/res/full_graph.lngossip https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin
           echo "Sha sum: $(sha256sum lightning-rapid-gossip-sync/res/full_graph.lngossip | awk '{ print $1 }')"
           if [ "$(sha256sum lightning-rapid-gossip-sync/res/full_graph.lngossip | awk '{ print $1 }')" != "${EXPECTED_RAPID_GOSSIP_SHASUM}" ]; then
             echo "Bad hash"
             exit 1
           fi
         env:
-          EXPECTED_RAPID_GOSSIP_SHASUM: 9637b91cea9d64320cf48fc0787c70fe69fc062f90d3512e207044110cadfd7b
+          EXPECTED_RAPID_GOSSIP_SHASUM: e0f5d11641c11896d7af3a2246d3d6c3f1720b7d2d17aab321ecce82e6b7deb8
       - name: Test with Network Graph on Rust ${{ matrix.toolchain }}
         run: |
           cd lightning
index 961b1121196a94b76f0fac5ec76c1613d83858c0..4618446397540439cd43357a794da30bcadcdf5d 100644 (file)
@@ -1,3 +1,60 @@
+# 0.0.110 - 2022-07-26
+
+## API Updates
+ * `ChannelManager::send_probe` and `Score::probe_{failed,successful}` have
+   been added to make probing more explicit, as well as new
+   `Event::Probe{Failed,Successful}` events (#1567).
+ * `ProbabilisticScoringParameters::banned_nodes` has been renamed
+   `manual_node_penalties` and changed to take msat penalties (#1592).
+ * Per-payment tracking of failed paths was added to enable configuration of
+   `ProbabilisticScoringParameters::considered_impossible_penalty_msat` (#1600)
+ * `ProbabilisticScoringParameters::base_penalty_amount_multiplier_msat` was
+   added to allow a penalty that is only amount-dependent (#1617).
+ * `ProbabilisticScoringParameters::amount_penalty_multiplier_msat` was renamed
+   `liquidity_penalty_amount_multiplier_msat` (#1617).
+ * A new `Event::HTLCHandlingFailed` has been added which provides visibility
+   into failures to forward/claim accepted HTLCs (#1403).
+ * Support has been added for DNS hostnames in the `NetAddress` type, see
+   [BOLT PR #911](https://github.com/lightning/bolts/pull/911) (#1553).
+ * `GossipSync` now has `rapid`, `p2p`, and `none` constructors (#1618).
+ * `lightning-net-tokio` no longer requires types to be in `Arc`s (#1623).
+ * The `htlc_maximum_msat` field is now required in `ChannelUpdate` gossip
+   messages. In tests this rejects < 1% of channels (#1519).
+ * `ReadOnlyNetworkGraph::{channel,node}` have been added to query for
+   individual channel/node data, primarily for bindings users (#1543).
+ * `FeeEstimator` implementations are now wrapped internally to ensure values
+   below 253 sats/kW are never used (#1552).
+ * Route selection no longer attempts to randomize path selection. This is
+   unlikely to lead to a material change in the paths selected (#1610).
+
+## Bug Fixes
+ * Fixed a panic when deserializing `ChannelDetails` objects (#1588).
+ * When routing, channels are no longer fully saturated before MPP splits are
+   generated, instead a configuration knob was added as
+   `PaymentParameters::max_channel_saturation_power_of_half` (#1605).
+ * Fixed a panic which occurred in `ProbabilisticScorer` when wallclock time
+   goes backwards across a restart (#1603).
+
+## Serialization Compatibility
+ * All new fields are ignored by prior versions of LDK. All new fields are not
+   present when reading objects serialized by prior versions of LDK.
+ * Channel information written in the `NetworkGraph` which is missing
+   `htlc_maximum_msat` may be dropped on deserialization (#1519).
+ * Similarly, node information written in the `NetworkGraph` which contains an
+   invalid hostname may be dropped on deserialization (#1519).
+
+In total, this release features 79 files changed, 2935 insertions, 1363
+deletions in 52 commits from 9 authors, in alphabetical order:
+ * Duncan Dean
+ * Elias Rohrer
+ * Jeffrey Czyz
+ * Matt Corallo
+ * Max Fang
+ * Viktor Tigerström
+ * Willem Van Lint
+ * Wilmer Paulino
+ * jurvis
+
 # 0.0.109 - 2022-07-01
 
 ## API Updates
index 93666dd22970d0bbeb2dedc206fa2f484d956586..f7cf8c4e699e3578285abbdbe3c72b8a0f06759b 100644 (file)
@@ -1,27 +1,35 @@
 Contributing to Rust-Lightning
 ==============================
 
-The Rust-Lightning project operates an open contributor model where anyone is
-welcome to contribute towards development in the form of peer review, documentation,
-testing and patches.
-
-Anyone is invited to contribute without regard to technical experience, "expertise", OSS
-experience, age, or other concern. However, the development of cryptocurrencies demands a
-high-level of rigor, adversarial thinking, thorough testing and risk-minimization.
-Any bug may cost users real money. That being said, we deeply welcome people contributing
-for the first time to an open source project or pick up Rust while contributing. Don't be shy,
-you'll learn.
-
-Communications Channels
+The `rust-lightning` project operates an open contributor model where anyone is
+welcome to contribute towards development in the form of peer review,
+documentation, testing and patches.
+
+Anyone is invited to contribute without regard to technical experience,
+"expertise", OSS experience, age, or other concern. However, the development of
+cryptocurrencies demands a high-level of rigor, adversarial thinking, thorough
+testing and risk-minimization. Any bug may cost users real money. That being
+said, we deeply welcome people contributing for the first time to an open source
+project or pick up Rust while contributing. Don't be shy, you'll learn.
+
+Communication Channels
 -----------------------
 
-Communication about Rust-Lightning happens primarily on #ldk-dev on the
-[LDK slack](http://www.lightningdevkit.org/), but also #rust-bitcoin on IRC Freenode.
+Communication about the development of LDK and `rust-lightning` happens
+primarily on the [LDK Discord](https://discord.gg/5AcknnMfBw) in the `#ldk-dev`
+channel. Additionally, live LDK devevelopment meetings are held every other
+Monday 19:00 UTC in the [LDK Dev Jitsi Meeting
+Room](https://meet.jit.si/ldkdevmeeting). Upcoming events can be found in the
+[LDK calendar](https://calendar.google.com/calendar/embed?src=c_e6fv6vlshbpoob2mmbvblkkoj4%40group.calendar.google.com).
+
+Contributors starting out with the Rust language are welcome to discuss and ask
+for help in the `#rust-101` channel on LDK Discord.
 
 Discussion about code base improvements happens in GitHub issues and on pull
 requests.
 
-Major projects are tracked [here](https://github.com/lightningdevkit/rust-lightning/projects).
+The LDK roadmap is tracked [here](https://github.com/orgs/lightningdevkit/projects/2).
+
 Major milestones are tracked [here](https://github.com/lightningdevkit/rust-lightning/milestones?direction=asc&sort=title&state=open).
 
 Getting Started
@@ -29,25 +37,28 @@ Getting Started
 
 First and foremost, start small.
 
-This doesn't mean don't be ambitious with the breadth and depth of your contributions but rather
-understand the project culture before investing an asymmetric number of hours on
-development compared to your merged work.
+This doesn't mean don't be ambitious with the breadth and depth of your
+contributions but rather understand the project culture before investing an
+asymmetric number of hours on development compared to your merged work.
 
 Browsing through the [meeting minutes](https://github.com/lightningdevkit/rust-lightning/wiki/Meetings)
-is a good first step. You will learn who is working on what, how releases are drafted, what are the
-pending tasks to deliver, where you can contribute review bandwidth, etc.
+is a good first step. You will learn who is working on what, how releases are
+drafted, what are the pending tasks to deliver, where you can contribute review
+bandwidth, etc.
 
-Even if you have an extensive open source background or sound software engineering skills, consider
-that the reviewers' comprehension of the code is as much important as technical correctness.
+Even if you have an extensive open source background or sound software
+engineering skills, consider that the reviewers' comprehension of the code is as
+much important as technical correctness.
 
-It's very welcome to ask for review, either on IRC or LDK Slack. And also for reviewers, it's nice
-to provide timelines when you hope to fulfill the request while bearing in mind for both sides that's
-a "soft" commitment.
+It's very welcome to ask for review on LDK Discord. And also for reviewers, it's
+nice to provide timelines when you hope to fulfill the request while bearing in
+mind for both sides that's a "soft" commitment.
 
-If you're eager to increase the velocity of the dev process, reviewing other contributors work is
-the best you can do while waiting review on yours.
+If you're eager to increase the velocity of the dev process, reviewing other
+contributors work is the best you can do while waiting review on yours.
 
-Also, getting familiar with the [glossary](GLOSSARY.md) will streamline discussions with regular contributors.
+Also, getting familiar with the [glossary](GLOSSARY.md) will streamline
+discussions with regular contributors.
 
 Contribution Workflow
 ---------------------
@@ -80,14 +91,14 @@ our GitHub Actions). Also, the compatibility for LDK object serialization is
 currently ensured back to and including crate version 0.0.99 (see the
 [changelog](CHANGELOG.md)).
 
-Commits should cover both the issue fixed and the solution's rationale.
-These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
+Commits should cover both the issue fixed and the solution's rationale. These
+[guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
 
-To facilitate communication with other contributors, the project is making use of
-GitHub's "assignee" field. First check that no one is assigned and then comment
-suggesting that you're working on it. If someone is already assigned, don't hesitate
-to ask if the assigned party or previous commenters are still working on it if it has
-been awhile.
+To facilitate communication with other contributors, the project is making use
+of GitHub's "assignee" field. First check that no one is assigned and then
+comment suggesting that you're working on it. If someone is already assigned,
+don't hesitate to ask if the assigned party or previous commenters are still
+working on it if it has been awhile.
 
 Peer review
 -----------
@@ -95,8 +106,8 @@ Peer review
 Anyone may participate in peer review which is expressed by comments in the pull
 request. Typically reviewers will review the code for obvious errors, as well as
 test out the patch set and opine on the technical merits of the patch. PR should
-be reviewed first on the conceptual level before focusing on code style or grammar
-fixes.
+be reviewed first on the conceptual level before focusing on code style or
+grammar fixes.
 
 Coding Conventions
 ------------------
@@ -104,65 +115,70 @@ Coding Conventions
 Use tabs. If you want to align lines, use spaces. Any desired alignment should
 display fine at any tab-length display setting.
 
-Our CI enforces [clippy's](https://github.com/rust-lang/rust-clippy) default linting
-[settings](https://rust-lang.github.io/rust-clippy/rust-1.39.0/index.html).
-This includes all lint groups except for nursery, pedantic, and cargo in addition to allowing the following lints:
-`erasing_op`, `never_loop`, `if_same_then_else`.
+Our CI enforces [clippy's](https://github.com/rust-lang/rust-clippy) default
+linting
+[settings](https://rust-lang.github.io/rust-clippy/rust-1.39.0/index.html). This
+includes all lint groups except for nursery, pedantic, and cargo in addition to
+allowing the following lints: `erasing_op`, `never_loop`, `if_same_then_else`.
 
-If you use rustup, feel free to lint locally, otherwise you can just push to CI for automated linting.
+If you use rustup, feel free to lint locally, otherwise you can just push to CI
+for automated linting.
 
 ```bash
 rustup component add clippy
 cargo clippy
 ```
 
-Significant structures that users persist should always have their serialization methods (usually
-`Writeable::write` and `ReadableArgs::read`) begin with
+Significant structures that users persist should always have their serialization
+methods (usually `Writeable::write` and `ReadableArgs::read`) begin with
 `write_ver_prefix!()`/`read_ver_prefix!()` calls, and end with calls to
 `write_tlv_fields!()`/`read_tlv_fields!()`.
 
-Updates to the serialized format which has implications for backwards or forwards compatibility
-must be included in release notes.
+Updates to the serialized format which has implications for backwards or
+forwards compatibility must be included in release notes.
 
 Security
 --------
 
-Security is the primary focus of Rust-Lightning; disclosure of security vulnerabilites
-helps prevent user loss of funds. If you believe a vulnerability may affect other Lightning
-implementations, please inform them.
+Security is the primary focus of `rust-lightning`; disclosure of security
+vulnerabilites helps prevent user loss of funds. If you believe a vulnerability
+may affect other Lightning implementations, please inform them.
 
-Note that Rust-Lightning is currently considered "pre-production" during this time, there
-is no special handling of security issues. Please simply open an issue on Github.
+You can find further information on submitting (possible) vulnerabilities in the
+[security policy](SECURITY.md).
 
 Testing
 -------
 
-Related to the security aspect, Rust-Lightning developers take testing
-very seriously. Due to the modular nature of the project, writing new functional
-tests is easy and good test coverage of the codebase is an important goal. Refactoring
-the project to enable fine-grained unit testing is also an ongoing effort.
+Related to the security aspect, `rust-lightning` developers take testing very
+seriously. Due to the modular nature of the project, writing new functional
+tests is easy and good test coverage of the codebase is an important goal.
+Refactoring the project to enable fine-grained unit testing is also an ongoing
+effort.
 
 Fuzzing is heavily encouraged: you will find all related material under `fuzz/`
 
-Mutation testing is work-in-progress; any contribution there would be warmly welcomed.
+Mutation testing is work-in-progress; any contribution there would be warmly
+welcomed.
 
 C/C++ Bindings
 --------------
 
-You can learn more about the C/C++ bindings that are made available by reading the
-[C/C++ Bindings README](lightning-c-bindings/README.md). If you are not using the C/C++ bindings,
-you likely don't need to worry about them, and during their early experimental phase we are not
-requiring that pull requests keep the bindings up to date (and, thus, pass the bindings_check CI
-run). If you wish to ensure your PR passes the bindings generation phase, you should run the
-`genbindings.sh` script in the top of the directory tree to generate, build, and test C bindings on
-your local system.
+You can learn more about the C/C++ bindings that are made available by reading
+the [C/C++ Bindings README](https://github.com/lightningdevkit/ldk-c-bindings/blob/main/lightning-c-bindings/README.md).
+If you are not using the C/C++ bindings, you likely don't need to worry about
+them, and during their early experimental phase we are not requiring that pull
+requests keep the bindings up to date (and, thus, pass the `bindings_check` CI
+run). If you wish to ensure your PR passes the bindings generation phase, you
+should run the `genbindings.sh` script in the top of the directory tree to
+generate, build, and test C bindings on your local system.
 
 Going further
 -------------
 
-You may be interested by Jon Atack guide on [How to review Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-review-bitcoin-core-prs.md)
+You may be interested by Jon Atack's guide on [How to review Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-review-bitcoin-core-prs.md)
 and [How to make Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-make-bitcoin-core-prs.md).
-While there are differences between the projects in terms of context and maturity, many
-of the suggestions offered apply to this project.
+While there are differences between the projects in terms of context and
+maturity, many of the suggestions offered apply to this project.
 
 Overall, have fun :)
index 3c34335c244af9d04734f2be010d424b06c498fd..fea9c35dc0ec3282be2545b43b36af62535de986 100644 (file)
--- a/README.md
+++ b/README.md
@@ -5,74 +5,73 @@ Rust-Lightning
 [![Documentation](https://img.shields.io/static/v1?logo=read-the-docs&label=docs.rs&message=lightning&color=informational)](https://docs.rs/lightning/)
 [![Safety Dance](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
 
-Rust-Lightning is a Bitcoin Lightning library written in Rust. The main crate,
+`rust-lightning` is a Bitcoin Lightning library written in Rust. The main crate,
 `lightning`, does not handle networking, persistence, or any other I/O. Thus,
 it is runtime-agnostic, but users must implement basic networking logic, chain
 interactions, and disk storage. More information is available in the `About`
 section.
 
-The `lightning-net-tokio` crate implements Lightning networking using the
-[Tokio](https://github.com/tokio-rs/tokio) async runtime.
-
-The `lightning-persister` crate implements persistence for channel data that
-is crucial to avoiding loss of channel funds. Sample modules for persistence of
-other Rust-Lightning data is coming soon.
-
 Status
 ------
-
-The project implements all of the BOLT specifications in the 1.0 spec. The
+The project implements all of the [BOLT
+specifications](https://github.com/lightning/bolts). The
 implementation has pretty good test coverage that is expected to continue to
 improve. It is also anticipated that as developers begin using the API, the
 lessons from that will result in changes to the API, so any developer using this
 API at this stage should be prepared to embrace that. The current state is
-sufficient for a developer or project to experiment with it. Recent increased
-contribution rate to the project is expected to lead to a high quality, stable,
-production-worthy implementation in 2021.
+sufficient for a developer or project to experiment with it.
 
-Communications for Rust-Lightning and Lightning Development Kit happens through our LDK
-[slack](https://join.slack.com/t/lightningdevkit/shared_invite/zt-tte36cb7-r5f41MDn3ObFtDu~N9dCrQ) & [discord](https://discord.gg/5AcknnMfBw) channels.
+Communications for `rust-lightning` and Lightning Development Kit happen through
+our LDK [Discord](https://discord.gg/5AcknnMfBw) channels.
 
 Crates
 -----------
-1. [lightning](./lightning)   
-  The Core of the LDK library, implements the lightning protocol, channel state machine, 
-  and on-chain logic. Supports no-std and exposes on relatively low-level interfaces.
+1. [lightning](./lightning)
+  The core of the LDK library, implements the Lightning protocol, channel state machine,
+  and on-chain logic. Supports `no-std` and exposes only relatively low-level interfaces.
 2. [lightning-background-processor](./lightning-background-processor)
   Utilities to perform required background tasks for Rust Lightning.
 3. [lightning-block-sync](./lightning-block-sync)
   Utilities to fetch the chain data from a block source and feed them into Rust Lightning.
 4. [lightning-invoice](./lightning-invoice)
-  Data structures to parse and serialize BOLT11 lightning invoices.
+  Data structures to parse and serialize
+  [BOLT #11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md)
+  Lightning invoices.
 5. [lightning-net-tokio](./lightning-net-tokio)
-  Implementation of the rust-lightning network stack using Tokio.
-  For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes,
-  this is a simple alternative to implementing the required network stack, especially for those already using Tokio.
+  Implementation of the `rust-lightning` network stack using the
+  [Tokio](https://github.com/tokio-rs/tokio) `async` runtime. For `rust-lightning`
+  clients which wish to make direct connections to Lightning P2P nodes, this is
+  a simple alternative to implementing the required network stack, especially
+  for those already using Tokio.
 6. [lightning-persister](./lightning-persister)
-  Utilities to manage Rust-Lightning channel data persistence and retrieval.
+  Implements utilities to manage `rust-lightning` channel data persistence and retrieval.
+  Persisting channel data is crucial to avoiding loss of channel funds.
 7. [lightning-rapid-gossip-sync](./lightning-rapid-gossip-sync)
   Client for rapid gossip graph syncing, aimed primarily at mobile clients.
 
 About
 -----------
-LDK/Rust-Lightning is a generic library which allows you to build a lightning
-node without needing to worry about getting all of the lightning state machine,
+LDK/`rust-lightning` is a generic library which allows you to build a Lightning
+node without needing to worry about getting all of the Lightning state machine,
 routing, and on-chain punishment code (and other chain interactions) exactly
-correct. Note that Rust-Lightning isn't, in itself, a node. There are various
+correct. Note that `rust-lightning` isn't, in itself, a node. There are various
 working/in progress demos which could be used as a node today, but if you "just"
-want a generic lightning node, you're almost certainly better off with
-`c-lightning`/`lnd` - if, on the other hand, you want to integrate lightning
-with custom features such as your own chain sync, your own key management, your
-own data storage/backup logic, etc., LDK is likely your only option. Some
-Rust-Lightning utilities such as those in `chan_utils` are also suitable for use
-in non-LN Bitcoin applications such as DLCs and bulletin boards.
-
-We are currently working on a demo node which fetches blockchain data and
-on-chain funds via Bitcoin Core RPC/REST. The individual pieces of that demo
-are/will be composable, so you can pick the off-the-shelf parts you want and
-replace the rest.
-
-In general, Rust-Lightning does not provide (but LDK has implementations of):
+want a generic Lightning node, you're almost certainly better off with [Core
+Lightning](https://github.com/ElementsProject/lightning) or
+[LND](https://github.com/lightningnetwork/lnd). If, on the other hand, you want
+to integrate Lightning with custom features such as your own chain sync, your
+own key management, your own data storage/backup logic, etc., LDK is likely your
+only option. Some `rust-lightning` utilities such as those in
+[`chan_utils`](./lightning/src/ln/chan_utils.rs) are also suitable for use in
+non-LN Bitcoin applications such as Discreet Log Contracts (DLCs) and bulletin boards.
+
+A sample node which fetches blockchain data and manages on-chain funds via the
+Bitcoin Core RPC/REST interface is available
+[here](https://github.com/lightningdevkit/ldk-sample/). The individual pieces of
+that demo are composable, so you can pick the off-the-shelf parts you want
+and replace the rest.
+
+In general, `rust-lightning` does not provide (but LDK has implementations of):
 * on-disk storage - you can store the channel state any way you want - whether
   Google Drive/iCloud, a local disk, any key-value store/database/a remote
   server, or any combination of them - we provide a clean API that provides
@@ -84,21 +83,21 @@ In general, Rust-Lightning does not provide (but LDK has implementations of):
   informed of, which is compatible with Electrum server requests/neutrino
   filtering/etc.
 * UTXO management - RL/LDK owns on-chain funds as long as they are claimable as
-  a part of a lightning output which can be contested - once a channel is closed
+  part of a Lightning output which can be contested - once a channel is closed
   and all on-chain outputs are spendable only by the user, we provide users
   notifications that a UTXO is "theirs" again and it is up to them to spend it
   as they wish. Additionally, channel funding is accomplished with a generic API
   which notifies users of the output which needs to appear on-chain, which they
   can then create a transaction for. Once a transaction is created, we handle
   the rest. This is a large part of our API's goals - making it easier to
-  integrate lightning into existing on-chain wallets which have their own
+  integrate Lightning into existing on-chain wallets which have their own
   on-chain logic - without needing to move funds in and out of a separate
-  lightning wallet with on-chain transactions and a separate private key system.
-* networking - to enable a user to run a full lightning node on an embedded
+  Lightning wallet with on-chain transactions and a separate private key system.
+* networking - to enable a user to run a full Lightning node on an embedded
   machine, we don't specify exactly how to connect to another node at all! We
   provide a default implementation which uses TCP sockets, but, e.g., if you
-  wanted to run your full lightning node on a hardware wallet, you could, by
-  piping the lightning network messages over USB/serial and then sending them in
+  wanted to run your full Lightning node on a hardware wallet, you could, by
+  piping the Lightning network messages over USB/serial and then sending them in
   a TCP socket from another machine.
 * private keys - again we have "default implementations", but users can chose to
   provide private keys to RL/LDK in any way they wish following a simple API. We
@@ -111,25 +110,24 @@ https://vimeo.com/showcase/8372504/video/412818125
 
 Design Goal
 -----------
-
-The goal is to provide a full-featured but also incredibly flexible lightning
+The goal is to provide a full-featured but also incredibly flexible Lightning
 implementation, allowing the user to decide how they wish to use it. With that
 in mind, everything should be exposed via simple, composable APIs. More
-information about Rust-Lightning's flexibility is provided in the `About`
+information about `rust-lightning`'s flexibility is provided in the `About`
 section above.
 
 For security reasons, do not add new dependencies. Really do not add new
 non-optional/non-test/non-library dependencies. Really really do not add
 dependencies with dependencies. Do convince Andrew to cut down dependency usage
-in rust-bitcoin.
+in `rust-bitcoin`.
 
 Rust-Lightning vs. LDK (Lightning Development Kit)
 -------------
-Rust-Lightning refers to the core `lightning` crate within this repo, whereas
-LDK encompasses Rust-Lightning and all of its sample modules and crates (e.g.
+`rust-lightning` refers to the core `lightning` crate within this repo, whereas
+LDK encompasses `rust-lightning` and all of its sample modules and crates (e.g.
 the `lightning-persister` crate), language bindings, sample node
-implementation(s), and other tools built around using Rust-Lightning for
-lightning integration or building a lightning node.
+implementation(s), and other tools built around using `rust-lightning` for
+Lightning integration or building a Lightning node.
 
 Tagline
 -------
@@ -144,7 +142,7 @@ Contributors are warmly welcome, see [CONTRIBUTING.md](CONTRIBUTING.md).
 Project Architecture
 ---------------------
 
-For a Rust-Lightning high-level API introduction, see [ARCH.md](ARCH.md).
+For a `rust-lightning` high-level API introduction, see [ARCH.md](ARCH.md).
 
 License is either Apache-2.0 or MIT, at the option of the user (ie dual-license
 Apache-2.0 and MIT).
index 9d5ccaa8105e954a541ef1e19fa83ff26c148b83..c11e1baf61712a89ec91a3fbdb0b993b75c8f109 100644 (file)
@@ -54,6 +54,7 @@ use utils::test_logger::{self, Output};
 use utils::test_persister::TestPersister;
 
 use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
@@ -140,7 +141,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                };
                let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1;
-               deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
+               deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
                deserialized_monitor.write(&mut ser).unwrap();
                map_entry.insert((update.update_id, ser.0));
@@ -148,7 +149,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                self.chain_monitor.update_channel(funding_txo, update)
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }
@@ -165,6 +166,14 @@ impl KeysInterface for KeyProvider {
                Ok(SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap())
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret.mul_assign(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id])
        }
@@ -860,6 +869,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                events::Event::PendingHTLCsForwardable { .. } => {
                                                        nodes[$node].process_pending_htlc_forwards();
                                                },
+                                               events::Event::HTLCHandlingFailed { .. } => {},
                                                _ => if out.may_fail.load(atomic::Ordering::Acquire) {
                                                        return;
                                                } else {
index fa0211aa280bc0794fa37f67d53332b1793d3129..f0f8054624d5084fca0fb69f4d3e46596aee5b52 100644 (file)
@@ -51,6 +51,7 @@ use utils::test_logger;
 use utils::test_persister::TestPersister;
 
 use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
@@ -269,6 +270,14 @@ impl KeysInterface for KeyProvider {
                Ok(self.node_secret.clone())
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret.mul_assign(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
index 7b2a26f7b71e9ef6a35c59007dbfd872b6882114..2df83f2b75a1e2f646ffdf3abf62b60cac6304b3 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-background-processor"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Valentine Wallace <vwallace@protonmail.com>"]
 license = "MIT OR Apache-2.0"
 repository = "http://github.com/lightningdevkit/rust-lightning"
@@ -15,10 +15,10 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.28.1"
-lightning = { version = "0.0.109", path = "../lightning", features = ["std"] }
-lightning-rapid-gossip-sync = { version = "0.0.109", path = "../lightning-rapid-gossip-sync" }
+lightning = { version = "0.0.110", path = "../lightning", features = ["std"] }
+lightning-rapid-gossip-sync = { version = "0.0.110", path = "../lightning-rapid-gossip-sync" }
 
 [dev-dependencies]
-lightning = { version = "0.0.109", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.17.0", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.109", path = "../lightning-persister" }
+lightning = { version = "0.0.110", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.18.0", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.110", path = "../lightning-persister" }
index 11dca44bbab3d7b5c91585835ddc02ccf4baa393..484439b3907b364dddc3dc2c13bb6c078be2ad0c 100644 (file)
@@ -137,6 +137,55 @@ where A::Target: chain::Access, L::Target: Logger {
        }
 }
 
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<P: Deref<Target = P2PGossipSync<G, A, L>>, G: Deref<Target = NetworkGraph<L>>, A: Deref, L: Deref>
+       GossipSync<P, &RapidGossipSync<G, L>, G, A, L>
+where
+       A::Target: chain::Access,
+       L::Target: Logger,
+{
+       /// Initializes a new [`GossipSync::P2P`] variant.
+       pub fn p2p(gossip_sync: P) -> Self {
+               GossipSync::P2P(gossip_sync)
+       }
+}
+
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
+       GossipSync<
+               &P2PGossipSync<G, &'a (dyn chain::Access + Send + Sync), L>,
+               R,
+               G,
+               &'a (dyn chain::Access + Send + Sync),
+               L,
+       >
+where
+       L::Target: Logger,
+{
+       /// Initializes a new [`GossipSync::Rapid`] variant.
+       pub fn rapid(gossip_sync: R) -> Self {
+               GossipSync::Rapid(gossip_sync)
+       }
+}
+
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<'a, L: Deref>
+       GossipSync<
+               &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn chain::Access + Send + Sync), L>,
+               &RapidGossipSync<&'a NetworkGraph<L>, L>,
+               &'a NetworkGraph<L>,
+               &'a (dyn chain::Access + Send + Sync),
+               L,
+       >
+where
+       L::Target: Logger,
+{
+       /// Initializes a new [`GossipSync::None`] variant.
+       pub fn none() -> Self {
+               GossipSync::None
+       }
+}
+
 /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
 struct DecoratingEventHandler<
        'a,
index d24953f50df36e8d03a02fbd1876a1063844b941..c6650208e26023f7c693bb5aa723439dacb91cb3 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-block-sync"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Jeffrey Czyz", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "http://github.com/lightningdevkit/rust-lightning"
@@ -19,7 +19,7 @@ rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
 
 [dependencies]
 bitcoin = "0.28.1"
-lightning = { version = "0.0.109", path = "../lightning" }
+lightning = { version = "0.0.110", path = "../lightning" }
 futures = { version = "0.3" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
 serde = { version = "1.0", features = ["derive"], optional = true }
index 6e30d2e86d20d268529bd37d942492db810d84b6..4c6cb0c0600725e9cf3552045865e8b5be472199 100644 (file)
@@ -59,12 +59,11 @@ impl Validate for BlockHeaderData {
        type T = ValidatedBlockHeader;
 
        fn validate(self, block_hash: BlockHash) -> BlockSourceResult<Self::T> {
-               self.header
+               let pow_valid_block_hash = self.header
                        .validate_pow(&self.header.target())
                        .or_else(|e| Err(BlockSourceError::persistent(e)))?;
 
-               // TODO: Use the result of validate_pow instead of recomputing the block hash once upstream.
-               if self.header.block_hash() != block_hash {
+               if pow_valid_block_hash != block_hash {
                        return Err(BlockSourceError::persistent("invalid block hash"));
                }
 
@@ -76,12 +75,11 @@ impl Validate for Block {
        type T = ValidatedBlock;
 
        fn validate(self, block_hash: BlockHash) -> BlockSourceResult<Self::T> {
-               self.header
+               let pow_valid_block_hash = self.header
                        .validate_pow(&self.header.target())
                        .or_else(|e| Err(BlockSourceError::persistent(e)))?;
 
-               // TODO: Use the result of validate_pow instead of recomputing the block hash once upstream.
-               if self.block_hash() != block_hash {
+               if pow_valid_block_hash != block_hash {
                        return Err(BlockSourceError::persistent("invalid block hash"));
                }
 
index cae1d9f94d314a39711a7acad46ca125ca9d39de..cc0e68cfc422b1bd098b2b985509b6b8cd1bb710 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "lightning-invoice"
 description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.17.0"
+version = "0.18.0"
 authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
 documentation = "https://docs.rs/lightning-invoice/"
 license = "MIT OR Apache-2.0"
@@ -20,7 +20,7 @@ std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
 
 [dependencies]
 bech32 = { version = "0.8", default-features = false }
-lightning = { version = "0.0.109", path = "../lightning", default-features = false }
+lightning = { version = "0.0.110", path = "../lightning", default-features = false }
 secp256k1 = { version = "0.22", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
 bitcoin_hashes = { version = "0.10", default-features = false }
@@ -29,6 +29,6 @@ core2 = { version = "0.3.0", default-features = false, optional = true }
 serde = { version = "1.0.118", optional = true }
 
 [dev-dependencies]
-lightning = { version = "0.0.109", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.110", path = "../lightning", default-features = false, features = ["_test_utils"] }
 hex = "0.4"
 serde_json = { version = "1"}
index 051893ea0a3a5c9ce0405045e828644928f5c8d0..476d12a34751e885c43980453ede2db9288d0cd5 100644 (file)
@@ -1437,8 +1437,6 @@ mod tests {
        enum TestResult {
                PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
                PaymentSuccess { path: Vec<RouteHop> },
-               ProbeFailure { path: Vec<RouteHop>, short_channel_id: u64 },
-               ProbeSuccess { path: Vec<RouteHop> },
        }
 
        impl TestScorer {
@@ -1474,12 +1472,6 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_failed call: {:?}", actual_path),
                                }
                        }
@@ -1494,18 +1486,12 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_successful call: {:?}", actual_path),
                                }
                        }
                }
 
-               fn probe_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
+               fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
                        if let Some(expectations) = &mut self.expectations {
                                match expectations.pop_front() {
                                        Some(TestResult::PaymentFailure { path, .. }) => {
@@ -1514,13 +1500,6 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, short_channel_id }) => {
-                                               assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
-                                               assert_eq!(actual_short_channel_id, short_channel_id);
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_failed call: {:?}", actual_path),
                                }
                        }
@@ -1534,12 +1513,6 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
-                                       },
                                        None => panic!("Unexpected payment_path_successful call: {:?}", actual_path),
                                }
                        }
index 050ad4d0121b5b5265876a5488625e0422cd9bff..dd7770cbe8cdd65145e5ce663c45f2143754835e 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-net-tokio"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.28.1"
-lightning = { version = "0.0.109", path = "../lightning" }
+lightning = { version = "0.0.110", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
index 2ac10762b04eaf23f602bc3d8dcc987438542cae..645a7434e4536534138c5dd6150fe52bad55ff0c 100644 (file)
@@ -84,6 +84,7 @@ use lightning::ln::peer_handler::CustomMessageHandler;
 use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
 use lightning::util::logger::Logger;
 
+use std::ops::Deref;
 use std::task;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
@@ -120,11 +121,16 @@ struct Connection {
        id: u64,
 }
 impl Connection {
-       async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, mut event_receiver: mpsc::Receiver<()>) where
-                       CMH: ChannelMessageHandler + 'static + Send + Sync,
-                       RMH: RoutingMessageHandler + 'static + Send + Sync,
-                       L: Logger + 'static + ?Sized + Send + Sync,
-                       UMH: CustomMessageHandler + 'static + Send + Sync {
+       async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
+                       CMH: Deref + 'static + Send + Sync,
+                       RMH: Deref + 'static + Send + Sync,
+                       L: Deref + 'static + Send + Sync,
+                       UMH: Deref + 'static + Send + Sync,
+                       CMH::Target: ChannelMessageHandler + Send + Sync,
+                       RMH::Target: RoutingMessageHandler + Send + Sync,
+                       L::Target: Logger + Send + Sync,
+                       UMH::Target: CustomMessageHandler + Send + Sync,
+    {
                loop {
                        if event_receiver.recv().await.is_none() {
                                return;
@@ -133,11 +139,16 @@ impl Connection {
                }
        }
 
-       async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
-                       CMH: ChannelMessageHandler + 'static + Send + Sync,
-                       RMH: RoutingMessageHandler + 'static + Send + Sync,
-                       L: Logger + 'static + ?Sized + Send + Sync,
-                       UMH: CustomMessageHandler + 'static + Send + Sync {
+       async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+                       CMH: Deref + 'static + Send + Sync,
+                       RMH: Deref + 'static + Send + Sync,
+                       L: Deref + 'static + Send + Sync,
+                       UMH: Deref + 'static + Send + Sync,
+                       CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
+                       RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
+                       L::Target: Logger + 'static + Send + Sync,
+                       UMH::Target: CustomMessageHandler + 'static + Send + Sync,
+        {
                // Create a waker to wake up poll_event_process, above
                let (event_waker, event_receiver) = mpsc::channel(1);
                tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver));
@@ -255,11 +266,16 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
-               CMH: ChannelMessageHandler + 'static + Send + Sync,
-               RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync,
-               UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+               CMH: Deref + 'static + Send + Sync,
+               RMH: Deref + 'static + Send + Sync,
+               L: Deref + 'static + Send + Sync,
+               UMH: Deref + 'static + Send + Sync,
+               CMH::Target: ChannelMessageHandler + Send + Sync,
+               RMH::Target: RoutingMessageHandler + Send + Sync,
+               L::Target: Logger + Send + Sync,
+               UMH::Target: CustomMessageHandler + Send + Sync,
+{
        let remote_addr = get_addr_from_stream(&stream);
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
@@ -297,11 +313,16 @@ pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManag
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
-               CMH: ChannelMessageHandler + 'static + Send + Sync,
-               RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync,
-               UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+               CMH: Deref + 'static + Send + Sync,
+               RMH: Deref + 'static + Send + Sync,
+               L: Deref + 'static + Send + Sync,
+               UMH: Deref + 'static + Send + Sync,
+               CMH::Target: ChannelMessageHandler + Send + Sync,
+               RMH::Target: RoutingMessageHandler + Send + Sync,
+               L::Target: Logger + Send + Sync,
+               UMH::Target: CustomMessageHandler + Send + Sync,
+{
        let remote_addr = get_addr_from_stream(&stream);
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
@@ -368,11 +389,16 @@ pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerMana
 /// disconnected and associated handling futures are freed, though, because all processing in said
 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
 /// make progress.
-pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
-               CMH: ChannelMessageHandler + 'static + Send + Sync,
-               RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync,
-               UMH: CustomMessageHandler + 'static + Send + Sync {
+pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+               CMH: Deref + 'static + Send + Sync,
+               RMH: Deref + 'static + Send + Sync,
+               L: Deref + 'static + Send + Sync,
+               UMH: Deref + 'static + Send + Sync,
+               CMH::Target: ChannelMessageHandler + Send + Sync,
+               RMH::Target: RoutingMessageHandler + Send + Sync,
+               L::Target: Logger + Send + Sync,
+               UMH::Target: CustomMessageHandler + Send + Sync,
+{
        if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
                Some(setup_outbound(peer_manager, their_node_id, stream))
        } else { None }
index c13dcb169d4e51ef8cceba3baabaef2b26584caf..7de0ddc153c807c9a97016dfb6f1feaa4b1a5fdb 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-persister"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
@@ -17,11 +17,11 @@ _bench_unstable = ["lightning/_bench_unstable"]
 
 [dependencies]
 bitcoin = "0.28.1"
-lightning = { version = "0.0.109", path = "../lightning" }
+lightning = { version = "0.0.110", path = "../lightning" }
 libc = "0.2"
 
 [target.'cfg(windows)'.dependencies]
 winapi = { version = "0.3", features = ["winbase"] }
 
 [dev-dependencies]
-lightning = { version = "0.0.109", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.110", path = "../lightning", features = ["_test_utils"] }
index 568e399fde43521d4cfdc520c0919c602716b2a9..39518bbbebd672ac6f3960288d48e8118be05ac2 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-rapid-gossip-sync"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Arik Sosman <git@arik.io>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -13,8 +13,8 @@ Utility to process gossip routing data from Rapid Gossip Sync Server.
 _bench_unstable = []
 
 [dependencies]
-lightning = { version = "0.0.109", path = "../lightning" }
+lightning = { version = "0.0.110", path = "../lightning" }
 bitcoin = { version = "0.28.1", default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.109", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.110", path = "../lightning", features = ["_test_utils"] }
index 7880e11a60ea45c824dfceb3e75d6ee9d48610c8..6e9280f86a3f85909c8c25e49fc254bcc3a06819 100644 (file)
@@ -240,7 +240,7 @@ mod tests {
                let sync_result = rapid_sync
                        .sync_network_graph_with_file_path("./res/full_graph.lngossip");
                if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
-                       let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
+                       let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin\n\n{:?}", io_error);
                        #[cfg(not(require_route_graph_test))]
                        {
                                println!("{}", error_string);
index 09693a76d6f3b4868424d863ddd97399802974f9..818e19fe4b42fa6f12a31597b8ca4798eee2b5cb 100644 (file)
@@ -8,7 +8,7 @@ use bitcoin::BlockHash;
 use bitcoin::secp256k1::PublicKey;
 
 use lightning::ln::msgs::{
-       DecodeError, ErrorAction, LightningError, OptionalField, UnsignedChannelUpdate,
+       DecodeError, ErrorAction, LightningError, UnsignedChannelUpdate,
 };
 use lightning::routing::gossip::NetworkGraph;
 use lightning::util::logger::Logger;
@@ -119,12 +119,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
                let default_htlc_minimum_msat: u64 = Readable::read(&mut read_cursor)?;
                let default_fee_base_msat: u32 = Readable::read(&mut read_cursor)?;
                let default_fee_proportional_millionths: u32 = Readable::read(&mut read_cursor)?;
-               let tentative_default_htlc_maximum_msat: u64 = Readable::read(&mut read_cursor)?;
-               let default_htlc_maximum_msat = if tentative_default_htlc_maximum_msat == u64::max_value() {
-                       OptionalField::Absent
-               } else {
-                       OptionalField::Present(tentative_default_htlc_maximum_msat)
-               };
+               let default_htlc_maximum_msat: u64 = Readable::read(&mut read_cursor)?;
 
                for _ in 0..update_count {
                        let scid_delta: BigSize = Readable::read(read_cursor)?;
@@ -147,7 +142,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
                                        flags: standard_channel_flags,
                                        cltv_expiry_delta: default_cltv_expiry_delta,
                                        htlc_minimum_msat: default_htlc_minimum_msat,
-                                       htlc_maximum_msat: default_htlc_maximum_msat.clone(),
+                                       htlc_maximum_msat: default_htlc_maximum_msat,
                                        fee_base_msat: default_fee_base_msat,
                                        fee_proportional_millionths: default_fee_proportional_millionths,
                                        excess_data: vec![],
@@ -170,13 +165,6 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
                                                action: ErrorAction::IgnoreError,
                                        })?;
 
-                               let htlc_maximum_msat =
-                                       if let Some(htlc_maximum_msat) = directional_info.htlc_maximum_msat {
-                                               OptionalField::Present(htlc_maximum_msat)
-                                       } else {
-                                               OptionalField::Absent
-                                       };
-
                                UnsignedChannelUpdate {
                                        chain_hash,
                                        short_channel_id,
@@ -184,7 +172,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
                                        flags: standard_channel_flags,
                                        cltv_expiry_delta: directional_info.cltv_expiry_delta,
                                        htlc_minimum_msat: directional_info.htlc_minimum_msat,
-                                       htlc_maximum_msat,
+                                       htlc_maximum_msat: directional_info.htlc_maximum_msat,
                                        fee_base_msat: directional_info.fees.base_msat,
                                        fee_proportional_millionths: directional_info.fees.proportional_millionths,
                                        excess_data: vec![],
@@ -212,13 +200,8 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
                        }
 
                        if channel_flags & 0b_0000_0100 > 0 {
-                               let tentative_htlc_maximum_msat: u64 = Readable::read(read_cursor)?;
-                               synthetic_update.htlc_maximum_msat = if tentative_htlc_maximum_msat == u64::max_value()
-                               {
-                                       OptionalField::Absent
-                               } else {
-                                       OptionalField::Present(tentative_htlc_maximum_msat)
-                               };
+                               let htlc_maximum_msat: u64 = Readable::read(read_cursor)?;
+                               synthetic_update.htlc_maximum_msat = htlc_maximum_msat;
                        }
 
                        network_graph.update_channel_unsigned(&synthetic_update)?;
index 3162a0acdb28cb8c38ef73abad161e0dd6f5823c..7980a9b1dc88182c1c7acb2ebd9df40a0afae1ab 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning"
-version = "0.0.109"
+version = "0.0.110"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
index 2ec7f318ff57bb5a3e798a0ecd651cc1fb5f4e1d..cbf609485ce1984800fa991e22d952e33ab5db56 100644 (file)
@@ -52,23 +52,18 @@ pub trait FeeEstimator {
        fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32;
 }
 
-// We need `FeeEstimator` implemented so that in some places where we only have a shared
-// reference to a `Deref` to a `FeeEstimator`, we can still wrap it.
-impl<D: Deref> FeeEstimator for D where D::Target: FeeEstimator {
-       fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 {
-               (**self).get_est_sat_per_1000_weight(confirmation_target)
-       }
-}
-
 /// Minimum relay fee as required by bitcoin network mempool policy.
 pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000;
 /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding.
 /// See the following Core Lightning commit for an explanation:
-/// https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0
+/// <https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0>
 pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253;
 
 /// Wraps a `Deref` to a `FeeEstimator` so that any fee estimations provided by it
-/// are bounded below by `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW)
+/// are bounded below by `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW).
+///
+/// Note that this does *not* implement [`FeeEstimator`] to make it harder to accidentally mix the
+/// two.
 pub(crate) struct LowerBoundedFeeEstimator<F: Deref>(pub F) where F::Target: FeeEstimator;
 
 impl<F: Deref> LowerBoundedFeeEstimator<F> where F::Target: FeeEstimator {
@@ -76,10 +71,8 @@ impl<F: Deref> LowerBoundedFeeEstimator<F> where F::Target: FeeEstimator {
        pub fn new(fee_estimator: F) -> Self {
                LowerBoundedFeeEstimator(fee_estimator)
        }
-}
 
-impl<F: Deref> FeeEstimator for LowerBoundedFeeEstimator<F> where F::Target: FeeEstimator {
-       fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 {
+       pub fn bounded_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 {
                cmp::max(
                        self.0.get_est_sat_per_1000_weight(confirmation_target),
                        FEERATE_FLOOR_SATS_PER_KW,
@@ -107,7 +100,7 @@ mod tests {
                let test_fee_estimator = &TestFeeEstimator { sat_per_kw };
                let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator);
 
-               assert_eq!(fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background), FEERATE_FLOOR_SATS_PER_KW);
+               assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background), FEERATE_FLOOR_SATS_PER_KW);
        }
 
        #[test]
@@ -116,6 +109,6 @@ mod tests {
                let test_fee_estimator = &TestFeeEstimator { sat_per_kw };
                let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator);
 
-               assert_eq!(fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background), sat_per_kw);
+               assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background), sat_per_kw);
        }
 }
index e6b5733520a71f74a73526b227f1d704f9cf25f9..5c4ede0b16161819a8c7b54f4b8696cb5274912a 100644 (file)
@@ -43,6 +43,7 @@ use prelude::*;
 use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
 use core::ops::Deref;
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use bitcoin::secp256k1::PublicKey;
 
 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
 /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
@@ -235,7 +236,7 @@ pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: De
        persister: P,
        /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
        /// from the user and not from a [`ChannelMonitor`].
-       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>)>>,
+       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
        /// The best block height seen, used as a proxy for the passage of time.
        highest_chain_height: AtomicUsize,
 }
@@ -299,7 +300,7 @@ where C::Target: chain::Filter,
                                                        log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                                Err(ChannelMonitorUpdateErr::PermanentFailure) => {
                                                        monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)]));
+                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
                                                },
                                                Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
                                                        log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
@@ -458,7 +459,7 @@ where C::Target: chain::Filter,
                                self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
-                               }]));
+                               }], monitor_data.monitor.get_counterparty_node_id()));
                        },
                        MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
                                if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
@@ -476,10 +477,12 @@ where C::Target: chain::Filter,
        /// channel_monitor_updated once with the highest ID.
        #[cfg(any(test, fuzzing))]
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
+               let monitors = self.monitors.read().unwrap();
+               let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
                self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                        funding_txo,
                        monitor_update_id,
-               }]));
+               }], counterparty_node_id));
        }
 
        #[cfg(any(test, fuzzing, feature = "_test_utils"))]
@@ -636,7 +639,7 @@ where C::Target: chain::Filter,
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
                                log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
-                               let update_res = monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger);
+                               let update_res = monitor.update_monitor(&update, &self.broadcaster, &*self.fee_estimator, &self.logger);
                                if update_res.is_err() {
                                        log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
                                }
@@ -666,7 +669,7 @@ where C::Target: chain::Filter,
                }
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
@@ -695,7 +698,8 @@ where C::Target: chain::Filter,
                                let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
                                if monitor_events.len() > 0 {
                                        let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
-                                       pending_monitor_events.push((monitor_outpoint, monitor_events));
+                                       let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
+                                       pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
                                }
                        }
                }
index 80cd9cb9d45958629e1789513190a4de63b87e7b..855263fe53b9bd09ce0f6bbbd64beabc34db64d5 100644 (file)
@@ -965,6 +965,13 @@ impl<Signer: Sign> Writeable for ChannelMonitorImpl<Signer> {
 }
 
 impl<Signer: Sign> ChannelMonitor<Signer> {
+       /// For lockorder enforcement purposes, we need to have a single site which constructs the
+       /// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
+       /// PartialEq implementation) we may decide a lockorder violation has occurred.
+       fn from_impl(imp: ChannelMonitorImpl<Signer>) -> Self {
+               ChannelMonitor { inner: Mutex::new(imp) }
+       }
+
        pub(crate) fn new(secp_ctx: Secp256k1<secp256k1::All>, keys: Signer, shutdown_script: Option<Script>,
                          on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script),
                          channel_parameters: &ChannelTransactionParameters,
@@ -1012,60 +1019,58 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                let mut outputs_to_watch = HashMap::new();
                outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
 
-               ChannelMonitor {
-                       inner: Mutex::new(ChannelMonitorImpl {
-                               latest_update_id: 0,
-                               commitment_transaction_number_obscure_factor,
+               Self::from_impl(ChannelMonitorImpl {
+                       latest_update_id: 0,
+                       commitment_transaction_number_obscure_factor,
 
-                               destination_script: destination_script.clone(),
-                               broadcasted_holder_revokable_script: None,
-                               counterparty_payment_script,
-                               shutdown_script,
+                       destination_script: destination_script.clone(),
+                       broadcasted_holder_revokable_script: None,
+                       counterparty_payment_script,
+                       shutdown_script,
 
-                               channel_keys_id,
-                               holder_revocation_basepoint,
-                               funding_info,
-                               current_counterparty_commitment_txid: None,
-                               prev_counterparty_commitment_txid: None,
+                       channel_keys_id,
+                       holder_revocation_basepoint,
+                       funding_info,
+                       current_counterparty_commitment_txid: None,
+                       prev_counterparty_commitment_txid: None,
 
-                               counterparty_commitment_params,
-                               funding_redeemscript,
-                               channel_value_satoshis,
-                               their_cur_per_commitment_points: None,
+                       counterparty_commitment_params,
+                       funding_redeemscript,
+                       channel_value_satoshis,
+                       their_cur_per_commitment_points: None,
 
-                               on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
+                       on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
 
-                               commitment_secrets: CounterpartyCommitmentSecrets::new(),
-                               counterparty_claimable_outpoints: HashMap::new(),
-                               counterparty_commitment_txn_on_chain: HashMap::new(),
-                               counterparty_hash_commitment_number: HashMap::new(),
+                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
+                       counterparty_claimable_outpoints: HashMap::new(),
+                       counterparty_commitment_txn_on_chain: HashMap::new(),
+                       counterparty_hash_commitment_number: HashMap::new(),
 
-                               prev_holder_signed_commitment_tx: None,
-                               current_holder_commitment_tx: holder_commitment_tx,
-                               current_counterparty_commitment_number: 1 << 48,
-                               current_holder_commitment_number,
+                       prev_holder_signed_commitment_tx: None,
+                       current_holder_commitment_tx: holder_commitment_tx,
+                       current_counterparty_commitment_number: 1 << 48,
+                       current_holder_commitment_number,
 
-                               payment_preimages: HashMap::new(),
-                               pending_monitor_events: Vec::new(),
-                               pending_events: Vec::new(),
+                       payment_preimages: HashMap::new(),
+                       pending_monitor_events: Vec::new(),
+                       pending_events: Vec::new(),
 
-                               onchain_events_awaiting_threshold_conf: Vec::new(),
-                               outputs_to_watch,
+                       onchain_events_awaiting_threshold_conf: Vec::new(),
+                       outputs_to_watch,
 
-                               onchain_tx_handler,
+                       onchain_tx_handler,
 
-                               lockdown_from_offchain: false,
-                               holder_tx_signed: false,
-                               funding_spend_seen: false,
-                               funding_spend_confirmed: None,
-                               htlcs_resolved_on_chain: Vec::new(),
+                       lockdown_from_offchain: false,
+                       holder_tx_signed: false,
+                       funding_spend_seen: false,
+                       funding_spend_confirmed: None,
+                       htlcs_resolved_on_chain: Vec::new(),
 
-                               best_block,
-                               counterparty_node_id: Some(counterparty_node_id),
+                       best_block,
+                       counterparty_node_id: Some(counterparty_node_id),
 
-                               secp_ctx,
-                       }),
-               }
+                       secp_ctx,
+               })
        }
 
        #[cfg(test)]
@@ -1134,7 +1139,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                &self,
                updates: &ChannelMonitorUpdate,
                broadcaster: &B,
-               fee_estimator: &F,
+               fee_estimator: F,
                logger: &L,
        ) -> Result<(), ()>
        where
@@ -1209,6 +1214,14 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                self.inner.lock().unwrap().get_cur_holder_commitment_number()
        }
 
+       /// Gets the `node_id` of the counterparty for this channel.
+       ///
+       /// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
+       /// otherwise.
+       pub fn get_counterparty_node_id(&self) -> Option<PublicKey> {
+               self.inner.lock().unwrap().counterparty_node_id
+       }
+
        /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
        /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
        /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
@@ -1944,10 +1957,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
        }
 
-       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
+       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()>
        where B::Target: BroadcasterInterface,
-                   F::Target: FeeEstimator,
-                   L::Target: Logger,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
        {
                log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
                        log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
@@ -1985,7 +1998,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                },
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
                                        log_trace!(logger, "Updating ChannelMonitor with payment preimage");
-                                       let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
+                                       let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
                                        self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger)
                                },
                                ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
@@ -3361,60 +3374,58 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
 
-               Ok((best_block.block_hash(), ChannelMonitor {
-                       inner: Mutex::new(ChannelMonitorImpl {
-                               latest_update_id,
-                               commitment_transaction_number_obscure_factor,
+               Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
+                       latest_update_id,
+                       commitment_transaction_number_obscure_factor,
 
-                               destination_script,
-                               broadcasted_holder_revokable_script,
-                               counterparty_payment_script,
-                               shutdown_script,
+                       destination_script,
+                       broadcasted_holder_revokable_script,
+                       counterparty_payment_script,
+                       shutdown_script,
 
-                               channel_keys_id,
-                               holder_revocation_basepoint,
-                               funding_info,
-                               current_counterparty_commitment_txid,
-                               prev_counterparty_commitment_txid,
+                       channel_keys_id,
+                       holder_revocation_basepoint,
+                       funding_info,
+                       current_counterparty_commitment_txid,
+                       prev_counterparty_commitment_txid,
 
-                               counterparty_commitment_params,
-                               funding_redeemscript,
-                               channel_value_satoshis,
-                               their_cur_per_commitment_points,
+                       counterparty_commitment_params,
+                       funding_redeemscript,
+                       channel_value_satoshis,
+                       their_cur_per_commitment_points,
 
-                               on_holder_tx_csv,
+                       on_holder_tx_csv,
 
-                               commitment_secrets,
-                               counterparty_claimable_outpoints,
-                               counterparty_commitment_txn_on_chain,
-                               counterparty_hash_commitment_number,
+                       commitment_secrets,
+                       counterparty_claimable_outpoints,
+                       counterparty_commitment_txn_on_chain,
+                       counterparty_hash_commitment_number,
 
-                               prev_holder_signed_commitment_tx,
-                               current_holder_commitment_tx,
-                               current_counterparty_commitment_number,
-                               current_holder_commitment_number,
+                       prev_holder_signed_commitment_tx,
+                       current_holder_commitment_tx,
+                       current_counterparty_commitment_number,
+                       current_holder_commitment_number,
 
-                               payment_preimages,
-                               pending_monitor_events: pending_monitor_events.unwrap(),
-                               pending_events,
+                       payment_preimages,
+                       pending_monitor_events: pending_monitor_events.unwrap(),
+                       pending_events,
 
-                               onchain_events_awaiting_threshold_conf,
-                               outputs_to_watch,
+                       onchain_events_awaiting_threshold_conf,
+                       outputs_to_watch,
 
-                               onchain_tx_handler,
+                       onchain_tx_handler,
 
-                               lockdown_from_offchain,
-                               holder_tx_signed,
-                               funding_spend_seen: funding_spend_seen.unwrap(),
-                               funding_spend_confirmed,
-                               htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
+                       lockdown_from_offchain,
+                       holder_tx_signed,
+                       funding_spend_seen: funding_spend_seen.unwrap(),
+                       funding_spend_confirmed,
+                       htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
 
-                               best_block,
-                               counterparty_node_id,
+                       best_block,
+                       counterparty_node_id,
 
-                               secp_ctx,
-                       }),
-               }))
+                       secp_ctx,
+               })))
        }
 }
 
@@ -3534,7 +3545,7 @@ mod tests {
 
                let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks));
                assert!(
-                       pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
+                       pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
                        .is_err());
                // Even though we error'd on the first update, we should still have generated an HTLC claim
                // transaction
index 4231d08259abcbe1e4c50550f4b40aa7d7064faa..9a3baea8bb442a3c6928cca83559606a66de95ff 100644 (file)
@@ -27,6 +27,7 @@ use bitcoin::hash_types::WPubkeyHash;
 
 use bitcoin::secp256k1::{SecretKey, PublicKey};
 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Signing};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::{secp256k1, Witness};
 
@@ -404,6 +405,12 @@ pub trait KeysInterface {
        /// This method must return the same value each time it is called with a given `Recipient`
        /// parameter.
        fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()>;
+       /// Gets the ECDH shared secret of our [`node secret`] and `other_key`, multiplying by `tweak` if
+       /// one is provided. Note that this tweak can be applied to `other_key` instead of our node
+       /// secret, though this is less efficient.
+       ///
+       /// [`node secret`]: Self::get_node_secret
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()>;
        /// Get a script pubkey which we send funds to when claiming on-chain contestable outputs.
        ///
        /// This method should return a different value each time it is called, to avoid linking
@@ -1133,6 +1140,14 @@ impl KeysInterface for KeysManager {
                }
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret.mul_assign(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
@@ -1217,6 +1232,14 @@ impl KeysInterface for PhantomKeysManager {
                }
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret.mul_assign(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
index 5959508de3b728e29b13d326b76c066d665c48bd..a0eb17c6be0051eef2d902cd09f4af4ee1ff89a7 100644 (file)
@@ -15,6 +15,7 @@ use bitcoin::blockdata::script::Script;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::PublicKey;
 
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
 use chain::keysinterface::Sign;
@@ -302,7 +303,7 @@ pub trait Watch<ChannelSigner: Sign> {
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
        /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)>;
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
 }
 
 /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
index b0cfa9bf000e712f797b3087d7d37b066b0510b7..30530303e59b366239e88c96d846f36da77a49ee 100644 (file)
@@ -778,13 +778,13 @@ fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predic
        where F::Target: FeeEstimator,
              L::Target: Logger,
 {
-       let mut updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64;
+       let mut updated_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64;
        let mut fee = updated_feerate * (predicted_weight as u64) / 1000;
        if input_amounts <= fee {
-               updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal) as u64;
+               updated_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal) as u64;
                fee = updated_feerate * (predicted_weight as u64) / 1000;
                if input_amounts <= fee {
-                       updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u64;
+                       updated_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background) as u64;
                        fee = updated_feerate * (predicted_weight as u64) / 1000;
                        if input_amounts <= fee {
                                log_error!(logger, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
index 6b36682f43233f876dcacaf2e1ae34da0fef5e4e..b7466776a6e70f0f7720fdcadd3ade48a945089c 100644 (file)
@@ -2,11 +2,9 @@ pub use ::alloc::sync::Arc;
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
 
-use std::collections::HashSet;
 use std::cell::RefCell;
 
 use std::sync::atomic::{AtomicUsize, Ordering};
-
 use std::sync::Mutex as StdMutex;
 use std::sync::MutexGuard as StdMutexGuard;
 use std::sync::RwLock as StdRwLock;
@@ -14,8 +12,15 @@ use std::sync::RwLockReadGuard as StdRwLockReadGuard;
 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
 use std::sync::Condvar as StdCondvar;
 
+use prelude::HashMap;
+
 #[cfg(feature = "backtrace")]
-use backtrace::Backtrace;
+use {prelude::hash_map, backtrace::Backtrace, std::sync::Once};
+
+#[cfg(not(feature = "backtrace"))]
+struct Backtrace{}
+#[cfg(not(feature = "backtrace"))]
+impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
 
 pub type LockResult<Guard> = Result<Guard, ()>;
 
@@ -44,34 +49,77 @@ impl Condvar {
 
 thread_local! {
        /// We track the set of locks currently held by a reference to their `LockMetadata`
-       static LOCKS_HELD: RefCell<HashSet<Arc<LockMetadata>>> = RefCell::new(HashSet::new());
+       static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
 }
 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
 
+#[cfg(feature = "backtrace")]
+static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
+#[cfg(feature = "backtrace")]
+static LOCKS_INIT: Once = Once::new();
+
 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
 /// when the Mutex itself was constructed.
 struct LockMetadata {
        lock_idx: u64,
-       locked_before: StdMutex<HashSet<Arc<LockMetadata>>>,
-       #[cfg(feature = "backtrace")]
-       lock_construction_bt: Backtrace,
+       locked_before: StdMutex<HashMap<u64, LockDep>>,
+       _lock_construction_bt: Backtrace,
 }
-impl PartialEq for LockMetadata {
-       fn eq(&self, o: &LockMetadata) -> bool { self.lock_idx == o.lock_idx }
+
+struct LockDep {
+       lock: Arc<LockMetadata>,
+       /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
+       _lockdep_trace: Backtrace,
 }
-impl Eq for LockMetadata {}
-impl std::hash::Hash for LockMetadata {
-       fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); }
+
+#[cfg(feature = "backtrace")]
+fn get_construction_location(backtrace: &Backtrace) -> String {
+       // Find the first frame that is after `debug_sync` (or that is in our tests) and use
+       // that as the mutex construction site. Note that the first few frames may be in
+       // the `backtrace` crate, so we have to ignore those.
+       let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
+       let mut found_debug_sync = false;
+       for frame in backtrace.frames() {
+               for symbol in frame.symbols() {
+                       let symbol_name = symbol.name().unwrap().as_str().unwrap();
+                       if !sync_mutex_constr_regex.is_match(symbol_name) {
+                               if found_debug_sync {
+                                       if let Some(col) = symbol.colno() {
+                                               return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col);
+                                       } else {
+                                               // Windows debug symbols don't support column numbers, so fall back to
+                                               // line numbers only if no `colno` is available
+                                               return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap());
+                                       }
+                               }
+                       } else { found_debug_sync = true; }
+               }
+       }
+       panic!("Couldn't find mutex construction callsite");
 }
 
 impl LockMetadata {
-       fn new() -> LockMetadata {
-               LockMetadata {
-                       locked_before: StdMutex::new(HashSet::new()),
-                       lock_idx: LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64,
-                       #[cfg(feature = "backtrace")]
-                       lock_construction_bt: Backtrace::new(),
+       fn new() -> Arc<LockMetadata> {
+               let backtrace = Backtrace::new();
+               let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
+
+               let res = Arc::new(LockMetadata {
+                       locked_before: StdMutex::new(HashMap::new()),
+                       lock_idx,
+                       _lock_construction_bt: backtrace,
+               });
+
+               #[cfg(feature = "backtrace")]
+               {
+                       let lock_constr_location = get_construction_location(&res._lock_construction_bt);
+                       LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
+                       let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
+                       match locks.entry(lock_constr_location) {
+                               hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
+                               hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
+                       }
                }
+               res
        }
 
        // Returns whether we were a recursive lock (only relevant for read)
@@ -81,28 +129,37 @@ impl LockMetadata {
                        // For each lock which is currently locked, check that no lock's locked-before
                        // set includes the lock we're about to lock, which would imply a lockorder
                        // inversion.
-                       for locked in held.borrow().iter() {
-                               if read && *locked == *this {
+                       for (locked_idx, _locked) in held.borrow().iter() {
+                               if read && *locked_idx == this.lock_idx {
                                        // Recursive read locks are explicitly allowed
                                        return;
                                }
                        }
-                       for locked in held.borrow().iter() {
-                               if !read && *locked == *this {
-                                       panic!("Tried to lock a lock while it was held!");
+                       for (locked_idx, locked) in held.borrow().iter() {
+                               if !read && *locked_idx == this.lock_idx {
+                                       // With `feature = "backtrace"` set, we may be looking at different instances
+                                       // of the same lock.
+                                       debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
                                }
-                               for locked_dep in locked.locked_before.lock().unwrap().iter() {
-                                       if *locked_dep == *this {
+                               for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
+                                       if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
                                                #[cfg(feature = "backtrace")]
-                                               panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.lock_construction_bt);
+                                               panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
+                                                       get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
+                                                       get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
+                                                       _locked_dep._lockdep_trace);
                                                #[cfg(not(feature = "backtrace"))]
                                                panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
                                        }
                                }
                                // Insert any already-held locks in our locked-before set.
-                               this.locked_before.lock().unwrap().insert(Arc::clone(locked));
+                               let mut locked_before = this.locked_before.lock().unwrap();
+                               if !locked_before.contains_key(&locked.lock_idx) {
+                                       let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
+                                       locked_before.insert(lockdep.lock.lock_idx, lockdep);
+                               }
                        }
-                       held.borrow_mut().insert(Arc::clone(this));
+                       held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
                        inserted = true;
                });
                inserted
@@ -116,10 +173,14 @@ impl LockMetadata {
                        // Since a try-lock will simply fail if the lock is held already, we do not
                        // consider try-locks to ever generate lockorder inversions. However, if a try-lock
                        // succeeds, we do consider it to have created lockorder dependencies.
-                       for locked in held.borrow().iter() {
-                               this.locked_before.lock().unwrap().insert(Arc::clone(locked));
+                       let mut locked_before = this.locked_before.lock().unwrap();
+                       for (locked_idx, locked) in held.borrow().iter() {
+                               if !locked_before.contains_key(locked_idx) {
+                                       let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
+                                       locked_before.insert(*locked_idx, lockdep);
+                               }
                        }
-                       held.borrow_mut().insert(Arc::clone(this));
+                       held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
                });
        }
 }
@@ -149,7 +210,7 @@ impl<'a, T: Sized> MutexGuard<'a, T> {
 impl<T: Sized> Drop for MutexGuard<'_, T> {
        fn drop(&mut self) {
                LOCKS_HELD.with(|held| {
-                       held.borrow_mut().remove(&self.mutex.deps);
+                       held.borrow_mut().remove(&self.mutex.deps.lock_idx);
                });
        }
 }
@@ -170,7 +231,7 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
 
 impl<T> Mutex<T> {
        pub fn new(inner: T) -> Mutex<T> {
-               Mutex { inner: StdMutex::new(inner), deps: Arc::new(LockMetadata::new()) }
+               Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
        }
 
        pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
@@ -220,7 +281,7 @@ impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
                        return;
                }
                LOCKS_HELD.with(|held| {
-                       held.borrow_mut().remove(&self.lock.deps);
+                       held.borrow_mut().remove(&self.lock.deps.lock_idx);
                });
        }
 }
@@ -236,7 +297,7 @@ impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
        fn drop(&mut self) {
                LOCKS_HELD.with(|held| {
-                       held.borrow_mut().remove(&self.lock.deps);
+                       held.borrow_mut().remove(&self.lock.deps.lock_idx);
                });
        }
 }
@@ -249,7 +310,7 @@ impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
 
 impl<T> RwLock<T> {
        pub fn new(inner: T) -> RwLock<T> {
-               RwLock { inner: StdRwLock::new(inner), deps: Arc::new(LockMetadata::new()) }
+               RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
        }
 
        pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
@@ -271,96 +332,101 @@ impl<T> RwLock<T> {
        }
 }
 
-#[test]
-#[should_panic]
-fn recursive_lock_fail() {
-       let mutex = Mutex::new(());
-       let _a = mutex.lock().unwrap();
-       let _b = mutex.lock().unwrap();
-}
-
-#[test]
-fn recursive_read() {
-       let lock = RwLock::new(());
-       let _a = lock.read().unwrap();
-       let _b = lock.read().unwrap();
-}
+pub type FairRwLock<T> = RwLock<T>;
 
-#[test]
-#[should_panic]
-fn lockorder_fail() {
-       let a = Mutex::new(());
-       let b = Mutex::new(());
-       {
-               let _a = a.lock().unwrap();
-               let _b = b.lock().unwrap();
-       }
-       {
-               let _b = b.lock().unwrap();
-               let _a = a.lock().unwrap();
+mod tests {
+       use super::{RwLock, Mutex};
+
+       #[test]
+       #[should_panic]
+       #[cfg(not(feature = "backtrace"))]
+       fn recursive_lock_fail() {
+               let mutex = Mutex::new(());
+               let _a = mutex.lock().unwrap();
+               let _b = mutex.lock().unwrap();
+       }
+
+       #[test]
+       fn recursive_read() {
+               let lock = RwLock::new(());
+               let _a = lock.read().unwrap();
+               let _b = lock.read().unwrap();
+       }
+
+       #[test]
+       #[should_panic]
+       fn lockorder_fail() {
+               let a = Mutex::new(());
+               let b = Mutex::new(());
+               {
+                       let _a = a.lock().unwrap();
+                       let _b = b.lock().unwrap();
+               }
+               {
+                       let _b = b.lock().unwrap();
+                       let _a = a.lock().unwrap();
+               }
        }
-}
 
-#[test]
-#[should_panic]
-fn write_lockorder_fail() {
-       let a = RwLock::new(());
-       let b = RwLock::new(());
-       {
-               let _a = a.write().unwrap();
-               let _b = b.write().unwrap();
-       }
-       {
-               let _b = b.write().unwrap();
-               let _a = a.write().unwrap();
+       #[test]
+       #[should_panic]
+       fn write_lockorder_fail() {
+               let a = RwLock::new(());
+               let b = RwLock::new(());
+               {
+                       let _a = a.write().unwrap();
+                       let _b = b.write().unwrap();
+               }
+               {
+                       let _b = b.write().unwrap();
+                       let _a = a.write().unwrap();
+               }
        }
-}
 
-#[test]
-#[should_panic]
-fn read_lockorder_fail() {
-       let a = RwLock::new(());
-       let b = RwLock::new(());
-       {
-               let _a = a.read().unwrap();
-               let _b = b.read().unwrap();
-       }
-       {
-               let _b = b.read().unwrap();
-               let _a = a.read().unwrap();
+       #[test]
+       #[should_panic]
+       fn read_lockorder_fail() {
+               let a = RwLock::new(());
+               let b = RwLock::new(());
+               {
+                       let _a = a.read().unwrap();
+                       let _b = b.read().unwrap();
+               }
+               {
+                       let _b = b.read().unwrap();
+                       let _a = a.read().unwrap();
+               }
        }
-}
 
-#[test]
-fn read_recurisve_no_lockorder() {
-       // Like the above, but note that no lockorder is implied when we recursively read-lock a
-       // RwLock, causing this to pass just fine.
-       let a = RwLock::new(());
-       let b = RwLock::new(());
-       let _outer = a.read().unwrap();
-       {
-               let _a = a.read().unwrap();
-               let _b = b.read().unwrap();
-       }
-       {
-               let _b = b.read().unwrap();
-               let _a = a.read().unwrap();
+       #[test]
+       fn read_recursive_no_lockorder() {
+               // Like the above, but note that no lockorder is implied when we recursively read-lock a
+               // RwLock, causing this to pass just fine.
+               let a = RwLock::new(());
+               let b = RwLock::new(());
+               let _outer = a.read().unwrap();
+               {
+                       let _a = a.read().unwrap();
+                       let _b = b.read().unwrap();
+               }
+               {
+                       let _b = b.read().unwrap();
+                       let _a = a.read().unwrap();
+               }
        }
-}
 
-#[test]
-#[should_panic]
-fn read_write_lockorder_fail() {
-       let a = RwLock::new(());
-       let b = RwLock::new(());
-       {
-               let _a = a.write().unwrap();
-               let _b = b.read().unwrap();
-       }
-       {
-               let _b = b.read().unwrap();
-               let _a = a.write().unwrap();
+       #[test]
+       #[should_panic]
+       fn read_write_lockorder_fail() {
+               let a = RwLock::new(());
+               let b = RwLock::new(());
+               {
+                       let _a = a.write().unwrap();
+                       let _b = b.read().unwrap();
+               }
+               {
+                       let _b = b.read().unwrap();
+                       let _a = a.write().unwrap();
+               }
        }
 }
-
-pub type FairRwLock<T> = RwLock<T>;
index ba6d6bc71910e53ad37236dc30b6e19cd947a4d9..2e6b3ab3c0a99cb2d438b15982ed954d4bd35e94 100644 (file)
@@ -17,7 +17,7 @@
 //! figure out how best to make networking happen/timers fire/things get written to disk/keys get
 //! generated/etc. This makes it a good candidate for tight integration into an existing wallet
 //! instead of having a rather-separate lightning appendage to a wallet.
-//! 
+//!
 //! `default` features are:
 //!
 //! * `std` - enables functionalities which require `std`, including `std::io` trait implementations and things which utilize time
@@ -76,6 +76,8 @@ pub mod util;
 pub mod chain;
 pub mod ln;
 pub mod routing;
+#[allow(unused)]
+mod onion_message; // To be exposed after sending/receiving OMs is supported in PeerManager.
 
 #[cfg(feature = "std")]
 /// Re-export of either `core2::io` or `std::io`, depending on the `std` feature flag.
index 0d2b3b6a9f941c117c80465b7ff886f329a349ad..02ac7b48137fd6cad28c9e37d6b8a16421ea999f 100644 (file)
@@ -26,7 +26,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 use util::config::UserConfig;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use util::errors::APIError;
 use util::ser::{ReadableArgs, Writeable};
 use util::test_utils::TestBroadcaster;
@@ -832,7 +832,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
        check_added_monitors!(nodes[2], 1);
 
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -913,7 +913,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
 
        let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
@@ -1690,14 +1690,14 @@ fn test_monitor_update_on_pending_forwards() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance a bit so that we can send backwards from 3 to 1.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
        check_added_monitors!(nodes[2], 1);
 
        let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -1718,7 +1718,7 @@ fn test_monitor_update_on_pending_forwards() {
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -2106,7 +2106,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
        check_closed_broadcast!(nodes[1], true);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
 
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
        expect_payment_sent_without_paths!(nodes[0], payment_preimage);
@@ -2469,7 +2469,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        };
        if second_fails {
                nodes[2].node.fail_htlc_backwards(&payment_hash);
-               expect_pending_htlcs_forwardable!(nodes[2]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[2], 1);
                get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        } else {
@@ -2505,7 +2505,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
 
        if second_fails {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
        } else {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
index da770c7a1e48174d09e7504d51f152d6a310a6f0..fdd073f632a8d4b978453a55dc6c98b7f277e141 100644 (file)
@@ -931,7 +931,7 @@ impl<Signer: Sign> Channel<Signer> {
                        return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
 
-               let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+               let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
                let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors);
@@ -1078,11 +1078,11 @@ impl<Signer: Sign> Channel<Signer> {
                // We generally don't care too much if they set the feerate to something very high, but it
                // could result in the channel being useless due to everything being dust.
                let upper_limit = cmp::max(250 * 25,
-                       fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+                       fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
                if feerate_per_kw as u64 > upper_limit {
                        return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
                }
-               let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+               let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
                // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
                // occasional issues with feerate disagreements between an initiator that wants a feerate
                // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
@@ -4038,8 +4038,8 @@ impl<Signer: Sign> Channel<Signer> {
                // Propose a range from our current Background feerate to our Normal feerate plus our
                // force_close_avoidance_max_fee_satoshis.
                // If we fail to come to consensus, we'll have to force-close.
-               let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
-               let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+               let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+               let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
 
                // The spec requires that (when the channel does not have anchors) we only send absolute
@@ -5781,7 +5781,7 @@ impl<Signer: Sign> Channel<Signer> {
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+       pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
                // Note that we MUST only generate a monitor update that indicates force-closure - we're
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
@@ -5791,10 +5791,11 @@ impl<Signer: Sign> Channel<Signer> {
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
                // return them to fail the payment.
                let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               let counterparty_node_id = self.get_counterparty_node_id();
                for htlc_update in self.holding_cell_htlc_updates.drain(..) {
                        match htlc_update {
                                HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
-                                       dropped_outbound_htlcs.push((source, payment_hash));
+                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
                                },
                                _ => {}
                        }
@@ -6589,7 +6590,7 @@ mod tests {
        use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
        use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use ln::features::{InitFeatures, ChannelTypeFeatures};
-       use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
+       use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use ln::script::ShutdownScript;
        use ln::chan_utils;
        use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
@@ -6605,6 +6606,7 @@ mod tests {
        use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
        use bitcoin::secp256k1::ffi::Signature as FFISignature;
        use bitcoin::secp256k1::{SecretKey,PublicKey};
+       use bitcoin::secp256k1::ecdh::SharedSecret;
        use bitcoin::secp256k1::ecdsa::RecoverableSignature;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::Hash;
@@ -6645,6 +6647,7 @@ mod tests {
                type Signer = InMemorySigner;
 
                fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { panic!(); }
+               fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> { panic!(); }
                fn get_inbound_payment_key_material(&self) -> KeyMaterial { panic!(); }
                fn get_destination_script(&self) -> Script {
                        let secp_ctx = Secp256k1::signing_only();
@@ -7063,7 +7066,7 @@ mod tests {
                                flags: 0,
                                cltv_expiry_delta: 100,
                                htlc_minimum_msat: 5,
-                               htlc_maximum_msat: OptionalField::Absent,
+                               htlc_maximum_msat: MAX_VALUE_MSAT,
                                fee_base_msat: 110,
                                fee_proportional_millionths: 11,
                                excess_data: Vec::new(),
index 568df11dfa975742071bbf81e917bea05e0dd43e..5d84ff62877471034d2f5b59105e2f9c4236b994 100644 (file)
@@ -48,11 +48,11 @@ use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParame
 use ln::msgs;
 use ln::msgs::NetAddress;
 use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
+use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
 use ln::wire::Encode;
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
 use util::config::{UserConfig, ChannelConfig};
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use util::{byte_utils, events};
 use util::scid_utils::fake_scid;
 use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
@@ -281,7 +281,7 @@ enum ClaimFundsFromHop {
        DuplicateClaim,
 }
 
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -1890,7 +1890,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -1946,7 +1947,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
@@ -2189,7 +2192,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2397,7 +2400,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
-                       htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
+                       htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
                        fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
@@ -3107,21 +3110,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                                routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
+                                                                                       macro_rules! failure_handler {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+                                                                                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+                                                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                                                               short_channel_id: prev_short_channel_id,
+                                                                                                               outpoint: prev_funding_outpoint,
+                                                                                                               htlc_id: prev_htlc_id,
+                                                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                                                               phantom_shared_secret: $phantom_ss,
+                                                                                                       });
+
+                                                                                                       let reason = if $next_hop_unknown {
+                                                                                                               HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+                                                                                                       } else {
+                                                                                                               HTLCDestination::FailedPayment{ payment_hash }
+                                                                                                       };
+
+                                                                                                       failed_forwards.push((htlc_source, payment_hash,
+                                                                                                               HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+                                                                                                               reason
+                                                                                                       ));
+                                                                                                       continue;
+                                                                                               }
+                                                                                       }
                                                                                        macro_rules! fail_forward {
                                                                                                ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
                                                                                                        {
-                                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                                                       outpoint: prev_funding_outpoint,
-                                                                                                                       htlc_id: prev_htlc_id,
-                                                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                                                       phantom_shared_secret: $phantom_ss,
-                                                                                                               });
-                                                                                                               failed_forwards.push((htlc_source, payment_hash,
-                                                                                                                       HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
-                                                                                                               ));
-                                                                                                               continue;
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                                       macro_rules! failed_payment {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+                                                                                                       {
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -3129,7 +3153,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
                                                                                                        let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
-                                                                                                       let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                                       let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                        let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -3137,17 +3161,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                                        // `update_fail_malformed_htlc`, meaning here we encrypt the error as
                                                                                                                        // if it came from us (the second-to-last hop) but contains the sha256
                                                                                                                        // of the onion.
-                                                                                                                       fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+                                                                                                                       failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
                                                                                                                },
                                                                                                                Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                                                                                                                       fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+                                                                                                                       failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
                                                                                                                },
                                                                                                        };
                                                                                                        match next_hop {
                                                                                                                onion_utils::Hop::Receive(hop_data) => {
                                                                                                                        match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
                                                                                                                                Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
-                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                        }
                                                                                                                },
                                                                                                                _ => panic!(),
@@ -3198,7 +3222,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        }
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code, data }
+                                                                                               HTLCFailReason::Reason { failure_code, data },
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                },
@@ -3327,7 +3352,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                };
 
                                                                macro_rules! fail_htlc {
-                                                                       ($htlc: expr) => {
+                                                                       ($htlc: expr, $payment_hash: expr) => {
                                                                                let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
                                                                                        &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
@@ -3339,7 +3364,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
                                                                                        }), payment_hash,
-                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                                                                       HTLCDestination::FailedPayment { payment_hash: $payment_hash },
                                                                                ));
                                                                        }
                                                                }
@@ -3358,7 +3384,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                continue
                                                                                        }
                                                                                }
@@ -3380,7 +3406,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
                                                                                                log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
@@ -3414,7 +3440,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                                continue
                                                                                                        }
                                                                                                };
@@ -3433,7 +3459,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                                                                                                                log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -3442,17 +3468,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
                                                                                if payment_data.is_none() {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                        continue
                                                                                };
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
                                                                                        let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
@@ -3471,8 +3497,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+               for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
                }
                self.forward_htlcs(&mut phantom_receives);
 
@@ -3577,7 +3603,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        {
@@ -3616,7 +3642,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
@@ -3695,7 +3721,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
-                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+                               let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
                        }
 
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3731,7 +3758,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                self.best_block.read().unwrap().height()));
                                self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
-                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                               HTLCDestination::FailedPayment { payment_hash: *payment_hash });
                        }
                }
        }
@@ -3766,7 +3794,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
                        let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
                        if desired_err_code == 0x1000 | 20 {
-                               // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                               // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
+                               // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
                                0u16.write(&mut enc).expect("Writes cannot fail");
                        }
                        (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
@@ -3787,7 +3816,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        // be surfaced to the user.
        fn fail_holding_cell_htlcs(
                &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
-               _counterparty_node_id: &PublicKey
+               counterparty_node_id: &PublicKey
        ) {
                for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
                        match htlc_src {
@@ -3800,8 +3829,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                                };
                                        let channel_state = self.channel_state.lock().unwrap();
-                                       self.fail_htlc_backwards_internal(channel_state,
-                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+                                       self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
                                },
                                HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
                                        let mut session_priv_bytes = [0; 32];
@@ -3854,7 +3884,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// to fail and take the channel_state lock for each iteration (as we take ownership and may
        /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
        /// still-available channels.
-       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
                //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
                //identify whether we sent it or not based on the (I presume) very different runtime
                //between the branches here. We should make this async and move it into the forward HTLCs
@@ -3984,7 +4014,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                pending_events.push(path_failure);
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
@@ -4016,12 +4046,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                }
                                mem::drop(channel_state_lock);
+                               let mut pending_events = self.pending_events.lock().unwrap();
                                if let Some(time) = forward_event {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
                                        pending_events.push(events::Event::PendingHTLCsForwardable {
                                                time_forwardable: time
                                        });
                                }
+                               pending_events.push(events::Event::HTLCHandlingFailed {
+                                       prev_channel_id: outpoint.to_channel_id(),
+                                       failed_next_destination: destination
+                               });
                        },
                }
        }
@@ -4113,7 +4147,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        self.best_block.read().unwrap().height()));
                                        self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                                         HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
-                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
+                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+                                                                        HTLCDestination::FailedPayment { payment_hash } );
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
                                                ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
@@ -4357,7 +4392,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let chan_restoration_res;
-               let (mut pending_failures, finalized_claims) = {
+               let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -4368,6 +4403,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return;
                        }
 
+                       let counterparty_node_id = channel.get().get_counterparty_node_id();
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
                        let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
@@ -4386,12 +4422,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
-                       (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+                       (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                }
        }
 
@@ -4750,7 +4788,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -5036,7 +5075,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                short_channel_id, channel_outpoint)) =>
                        {
                                for failure in pending_failures.drain(..) {
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                                }
                                self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
                                self.finalize_claims(finalized_claim_htlcs);
@@ -5183,7 +5223,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
@@ -5192,7 +5232,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                                                }
                                        },
                                        MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@ -5834,7 +5875,7 @@ where
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
-                                               }));
+                                               }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
                                        if let Some(channel_ready) = channel_ready_opt {
                                                send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
@@ -5915,10 +5956,11 @@ where
                                                if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
                                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
                                                        timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
                                                                failure_code: 0x4000 | 15,
                                                                data: htlc_msat_height_data
-                                                       }));
+                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
                                                        false
                                                } else { true }
                                        });
@@ -5929,8 +5971,8 @@ where
 
                self.handle_init_event_channel_failures(failed_channels);
 
-               for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+               for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
                }
        }
 
@@ -7286,7 +7328,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -7311,7 +7355,7 @@ mod tests {
        use ln::msgs::ChannelMessageHandler;
        use routing::router::{PaymentParameters, RouteParameters, find_route};
        use util::errors::APIError;
-       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+       use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use util::test_utils;
        use chain::keysinterface::KeysInterface;
 
@@ -7474,7 +7518,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7594,8 +7638,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+               // fails), the second will process the resulting failure and fail the HTLC backward
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7636,7 +7682,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
index a49e711b808d3902cc93e2b81606b9de414b6d23..54d199a26f83f85cec4567baa1b1fae45d9e6faa 100644 (file)
@@ -24,7 +24,7 @@ use util::enforcing_trait_impls::EnforcingSigner;
 use util::scid_utils;
 use util::test_utils;
 use util::test_utils::{panicking, TestChainMonitor};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
 use util::errors::APIError;
 use util::config::UserConfig;
 use util::ser::{ReadableArgs, Writeable};
@@ -46,6 +46,7 @@ use core::cell::RefCell;
 use alloc::rc::Rc;
 use sync::{Arc, Mutex};
 use core::mem;
+use core::iter::repeat;
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
@@ -352,6 +353,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                }
                        }
 
+                       let broadcaster = test_utils::TestBroadcaster {
+                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
+                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
+                       };
+
                        // Before using all the new monitors to check the watch outpoints, use the full set of
                        // them to ensure we can write and reload our ChannelManager.
                        {
@@ -367,20 +373,13 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
                                        chain_monitor: self.chain_monitor,
-                                       tx_broadcaster: &test_utils::TestBroadcaster {
-                                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
-                                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
-                                       },
+                                       tx_broadcaster: &broadcaster,
                                        logger: &self.logger,
                                        channel_monitors,
                                }).unwrap();
                        }
 
                        let persister = test_utils::TestPersister::new();
-                       let broadcaster = test_utils::TestBroadcaster {
-                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
-                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
-                       };
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
@@ -1187,7 +1186,7 @@ macro_rules! commitment_signed_dance {
                {
                        commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
                        if $fail_backwards {
-                               $crate::expect_pending_htlcs_forwardable!($node_a);
+                               expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, vec![$crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id }]);
                                check_added_monitors!($node_a, 1);
 
                                let channel_state = $node_a.node.channel_state.lock().unwrap();
@@ -1255,23 +1254,72 @@ macro_rules! get_route_and_payment_hash {
 }
 
 #[macro_export]
-/// Clears (and ignores) a PendingHTLCsForwardable event
-macro_rules! expect_pending_htlcs_forwardable_ignore {
-       ($node: expr) => {{
+macro_rules! expect_pending_htlcs_forwardable_conditions {
+       ($node: expr, $expected_failures: expr) => {{
+               let expected_failures = $expected_failures;
                let events = $node.node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
                match events[0] {
                        $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
+
+               let count = expected_failures.len() + 1;
+               assert_eq!(events.len(), count);
+
+               if expected_failures.len() > 0 {
+                       expect_htlc_handling_failed_destinations!(events, expected_failures)
+               }
+       }}
+}
+
+#[macro_export]
+macro_rules! expect_htlc_handling_failed_destinations {
+       ($events: expr, $expected_failures: expr) => {{
+               for event in $events {
+                       match event {
+                               $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
+                               $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
+                                       assert!($expected_failures.contains(&failed_next_destination))
+                               },
+                               _ => panic!("Unexpected destination"),
+                       }
+               }
        }}
 }
 
+#[macro_export]
+/// Clears (and ignores) a PendingHTLCsForwardable event
+macro_rules! expect_pending_htlcs_forwardable_ignore {
+       ($node: expr) => {{
+               expect_pending_htlcs_forwardable_conditions!($node, vec![]);
+       }};
+}
+
+#[macro_export]
+/// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
+macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
+       }};
+}
+
 #[macro_export]
 /// Handles a PendingHTLCsForwardable event
 macro_rules! expect_pending_htlcs_forwardable {
        ($node: expr) => {{
-               $crate::expect_pending_htlcs_forwardable_ignore!($node);
+               expect_pending_htlcs_forwardable_ignore!($node);
+               $node.node.process_pending_htlc_forwards();
+
+               // Ensure process_pending_htlc_forwards is idempotent.
+               $node.node.process_pending_htlc_forwards();
+       }};
+}
+
+#[macro_export]
+/// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
+macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
                $node.node.process_pending_htlc_forwards();
 
                // Ensure process_pending_htlc_forwards is idempotent.
@@ -1816,7 +1864,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+       let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect();
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations);
 
        pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
 }
@@ -1857,7 +1906,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                                node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
                                if !update_next_node {
-                                       expect_pending_htlcs_forwardable!(node);
+                                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]);
                                }
                        }
                        let events = node.node.get_and_clear_pending_msg_events();
index e1050727c1e302560f9becdb2ce051c634705557..d7be8966250b66341b2580d6656a0067e4678aa8 100644 (file)
@@ -28,10 +28,10 @@ use routing::gossip::NetworkGraph;
 use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use util::errors::APIError;
 use util::ser::{Writeable, ReadableArgs};
 use util::config::UserConfig;
@@ -54,6 +54,7 @@ use io;
 use prelude::*;
 use alloc::collections::BTreeSet;
 use core::default::Default;
+use core::iter::repeat;
 use sync::{Arc, Mutex};
 
 use ln::functional_test_utils::*;
@@ -1061,26 +1062,6 @@ fn fake_network_test() {
        fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
        claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
 
-       // Add a duplicate new channel from 2 to 4
-       let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
-
-       // Send some payments across both channels
-       let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-       let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-       let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-
-
-       route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
-       let events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 0);
-       nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
-
-       //TODO: Test that routes work again here as we've been notified that the channel is full
-
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
-
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
        check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
@@ -1094,9 +1075,6 @@ fn fake_network_test() {
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
-       close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -1152,7 +1130,7 @@ fn holding_cell_htlc_counting() {
        // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
        // fails), the second will process the resulting failure and fail the HTLC backward.
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -2620,7 +2598,7 @@ fn claim_htlc_outputs_single_tx() {
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-               match events[1] {
+               match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
                }
@@ -2932,7 +2910,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
        check_added_monitors!(nodes[2], 1);
 
        let events = nodes[2].node.get_and_clear_pending_msg_events();
@@ -3004,7 +2982,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
                }
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -3072,7 +3050,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -3135,7 +3113,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        nodes[2].node.fail_htlc_backwards(&first_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3148,7 +3126,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        // Drop the last RAA from 3 -> 2
 
        nodes[2].node.fail_htlc_backwards(&second_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3165,7 +3143,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        check_added_monitors!(nodes[2], 1);
 
        nodes[2].node.fail_htlc_backwards(&third_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3197,11 +3175,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // commitment transaction for nodes[0] until process_pending_htlc_forwards().
                check_added_monitors!(nodes[1], 1);
                let events = nodes[1].node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
+               assert_eq!(events.len(), 2);
                match events[0] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
+               match events[1] {
+                       Event::HTLCHandlingFailed { .. } => { },
+                       _ => panic!("Unexpected event"),
+               }
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
@@ -3215,7 +3197,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
        match events[0] {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
                _ => panic!("Unexepected event"),
@@ -3422,7 +3404,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 3);
        assert_eq!(node_txn[0], node_txn[1]);
 
@@ -4289,7 +4271,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                connect_block(&nodes[1], &block);
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
 
        check_added_monitors!(nodes[1], 1);
        let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -4353,7 +4335,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
        connect_blocks(&nodes[1], 1);
 
        if forwarded_htlc {
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
                check_added_monitors!(nodes[1], 1);
                let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(fail_commit.len(), 1);
@@ -4840,7 +4822,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
@@ -5046,7 +5028,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
-       let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 2);
        check_spends!(revoked_htlc_txn[0], chan_1.3);
        assert_eq!(revoked_htlc_txn[1].input.len(), 1);
@@ -5401,7 +5383,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -5521,18 +5503,18 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
-       let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+       let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+       let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+       let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
        send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
-       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
@@ -5567,8 +5549,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Double-check that six of the new HTLC were added
        // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
        // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
 
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
@@ -5577,7 +5559,14 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[4].node.fail_htlc_backwards(&payment_hash_5);
        nodes[4].node.fail_htlc_backwards(&payment_hash_6);
        check_added_monitors!(nodes[4], 0);
-       expect_pending_htlcs_forwardable!(nodes[4]);
+
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
        check_added_monitors!(nodes[4], 1);
 
        let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
@@ -5591,7 +5580,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[5].node.fail_htlc_backwards(&payment_hash_2);
        nodes[5].node.fail_htlc_backwards(&payment_hash_4);
        check_added_monitors!(nodes[5], 0);
-       expect_pending_htlcs_forwardable!(nodes[5]);
+
+       let failed_destinations_2 = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
        check_added_monitors!(nodes[5], 1);
 
        let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
@@ -5599,9 +5593,18 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
        commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
 
-       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+
+       // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+       let failed_destinations_3 = vec![
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
        check_added_monitors!(nodes[3], 1);
        let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
@@ -5627,7 +5630,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        //
        // Alternatively, we may broadcast the previous commitment transaction, which should only
        // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
-       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
 
        if announce_latest {
                mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
@@ -5636,11 +5639,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        }
        let events = nodes[2].node.get_and_clear_pending_events();
        let close_event = if deliver_last_raa {
-               assert_eq!(events.len(), 2);
-               events[1].clone()
+               assert_eq!(events.len(), 2 + 6);
+               events.last().clone().unwrap()
        } else {
                assert_eq!(events.len(), 1);
-               events[0].clone()
+               events.last().clone().unwrap()
        };
        match close_event {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
@@ -5651,8 +5654,17 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
                expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+
+               let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+               expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
        } else {
-               expect_pending_htlcs_forwardable!(nodes[2]);
+               let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+               } else {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+               };
+
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
        }
        check_added_monitors!(nodes[2], 3);
 
@@ -6016,7 +6028,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        nodes[1].node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -6476,7 +6488,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
 
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(process_htlc_forwards_event.len(), 1);
+       assert_eq!(process_htlc_forwards_event.len(), 2);
        match &process_htlc_forwards_event[0] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
@@ -7101,7 +7113,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
 
@@ -7149,7 +7161,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 1);
 
@@ -7193,7 +7205,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        nodes[1].node.fail_htlc_backwards(&payment_hash_2);
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        check_added_monitors!(nodes[1], 1);
 
        let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -7594,7 +7606,7 @@ fn test_check_htlc_underpaying() {
        // Note that we first have to wait a random delay before processing the receipt of the HTLC,
        // and then will wait a second random delay before failing the HTLC back:
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
 
        // Node 3 is expecting payment of 100_000 but received 10_000,
        // it should fail htlc like we didn't know the preimage.
@@ -7851,7 +7863,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
-       let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 3);
        check_spends!(revoked_htlc_txn[1], chan.3);
 
@@ -7872,7 +7884,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
        let events = nodes[0].node.get_and_clear_pending_events();
        expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-       match events[1] {
+       match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
        }
@@ -8112,22 +8124,26 @@ fn test_counterparty_raa_skip_no_crash() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
-       let mut guard = nodes[0].node.channel_state.lock().unwrap();
-       let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
+       let per_commitment_secret;
+       let next_per_commitment_point;
+       {
+               let mut guard = nodes[0].node.channel_state.lock().unwrap();
+               let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
 
-       const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+               const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Make signer believe we got a counterparty signature, so that it allows the revocation
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+               // Make signer believe we got a counterparty signature, so that it allows the revocation
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
 
-       // Must revoke without gaps
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+               // Must revoke without gaps
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
 
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
-               &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
+                       &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+       }
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
@@ -8148,19 +8164,19 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
        // Lock HTLC in both directions
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
-       route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+       let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
        // Revoke local commitment tx
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
 
        // Broadcast set of revoked txn on A
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
 
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -8335,19 +8351,19 @@ fn test_channel_update_has_correct_htlc_maximum_msat() {
 
        // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
        // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
-       assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+       assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
        // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
        // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
-       assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+       assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
 
        // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
        // `channel_value`.
-       assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+       assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
        // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
        // `channel_value`.
-       assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+       assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
 }
 
 #[test]
@@ -8469,12 +8485,12 @@ fn test_reject_funding_before_inbound_channel_accepted() {
        // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
        // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
        // succeed when `nodes[0]` is passed to it.
-       {
+       let accept_chan_msg = {
                let mut lock;
                let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
-               let accept_chan_msg = channel.get_accept_channel_message();
-               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
-       }
+               channel.get_accept_channel_message()
+       };
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
 
        let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
 
@@ -8713,7 +8729,7 @@ fn test_bad_secret_hash() {
        // All the below cases should end up being handled exactly identically, so we macro the
        // resulting events.
        macro_rules! handle_unknown_invalid_payment_data {
-               () => {
+               ($payment_hash: expr) => {
                        check_added_monitors!(nodes[0], 1);
                        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -8723,7 +8739,7 @@ fn test_bad_secret_hash() {
                        // We have to forward pending HTLCs once to process the receipt of the HTLC and then
                        // again to process the pending backwards-failure of the HTLC
                        expect_pending_htlcs_forwardable!(nodes[1]);
-                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
                        check_added_monitors!(nodes[1], 1);
 
                        // We should fail the payment back
@@ -8744,17 +8760,17 @@ fn test_bad_secret_hash() {
 
        // Send a payment with the right payment hash but the wrong payment secret
        nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(our_payment_hash);
        expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash, but the right payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash and random payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 }
 
@@ -9580,7 +9596,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
                        &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
                check_added_monitors!(nodes[1], 1);
 
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -9763,7 +9779,11 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                // Now we go fail back the first HTLC from the user end.
                nodes[1].node.fail_htlc_backwards(&our_payment_hash);
 
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               let expected_destinations = vec![
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+               ];
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
                nodes[1].node.process_pending_htlc_forwards();
 
                check_added_monitors!(nodes[1], 1);
@@ -9780,7 +9800,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
        } else {
                // Let the second HTLC fail and claim the first
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                nodes[1].node.process_pending_htlc_forwards();
 
                check_added_monitors!(nodes[1], 1);
@@ -9822,7 +9842,7 @@ fn test_inconsistent_mpp_params() {
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
@@ -9877,7 +9897,7 @@ fn test_inconsistent_mpp_params() {
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[3]);
        nodes[3].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        nodes[3].node.process_pending_htlc_forwards();
 
        check_added_monitors!(nodes[3], 1);
@@ -9886,7 +9906,7 @@ fn test_inconsistent_mpp_params() {
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
 
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
        check_added_monitors!(nodes[2], 1);
 
        let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
@@ -10006,7 +10026,11 @@ fn test_double_partial_claim() {
        connect_blocks(&nodes[3], TEST_FINAL_CLTV);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
 
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash },
+               HTLCDestination::FailedPayment { payment_hash },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
 
        pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
 
index 28c86b79598cf70572069ce55561766a522b3130..af654837861fc2eeac3d1f7795453b1a206cae46 100644 (file)
@@ -43,7 +43,7 @@ pub mod channel;
 #[cfg(not(fuzzing))]
 pub(crate) mod channel;
 
-mod onion_utils;
+pub(crate) mod onion_utils;
 pub mod wire;
 
 // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro
index 7c46f7b6d283a10d7ab4f5b97e45a611c1f2698b..4f36b9a88810aa5122c3347953028c6b31943a87 100644 (file)
@@ -15,7 +15,7 @@ use ln::channel;
 use ln::channelmanager::BREAKDOWN_TIMEOUT;
 use ln::features::InitFeatures;
 use ln::msgs::ChannelMessageHandler;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
@@ -74,7 +74,7 @@ fn chanmon_fail_from_stale_commitment() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
        check_added_monitors!(nodes[1], 1);
        let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
index 3e44cfcf024034a7de39a614e50cc5a974cfc632..05a336c18eff5634e0fee7f2cec8b86a66e02a89 100644 (file)
@@ -31,6 +31,8 @@ use bitcoin::blockdata::script::Script;
 use bitcoin::hash_types::{Txid, BlockHash};
 
 use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
+use ln::onion_utils;
+use onion_message;
 
 use prelude::*;
 use core::fmt;
@@ -40,7 +42,7 @@ use io_extras::read_to_end;
 
 use util::events::MessageSendEventsProvider;
 use util::logger;
-use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt, Hostname};
+use util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt, Hostname};
 
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 
@@ -304,6 +306,14 @@ pub struct UpdateAddHTLC {
        pub(crate) onion_routing_packet: OnionPacket,
 }
 
+ /// An onion message to be sent or received from a peer
+#[derive(Clone, Debug, PartialEq)]
+pub struct OnionMessage {
+       /// Used in decrypting the onion packet's payload.
+       pub blinding_point: PublicKey,
+       pub(crate) onion_routing_packet: onion_message::Packet,
+}
+
 /// An update_fulfill_htlc message to be sent or received from a peer
 #[derive(Clone, Debug, PartialEq)]
 pub struct UpdateFulfillHTLC {
@@ -647,8 +657,8 @@ pub struct UnsignedChannelUpdate {
        pub cltv_expiry_delta: u16,
        /// The minimum HTLC size incoming to sender, in milli-satoshi
        pub htlc_minimum_msat: u64,
-       /// Optionally, the maximum HTLC value incoming to sender, in milli-satoshi
-       pub htlc_maximum_msat: OptionalField<u64>,
+       /// The maximum HTLC value incoming to sender, in milli-satoshi. Used to be optional.
+       pub htlc_maximum_msat: u64,
        /// The base HTLC fee charged by sender, in milli-satoshi
        pub fee_base_msat: u32,
        /// The amount to fee multiplier, in micro-satoshi
@@ -993,6 +1003,18 @@ pub(crate) struct OnionPacket {
        pub(crate) hmac: [u8; 32],
 }
 
+impl onion_utils::Packet for OnionPacket {
+       type Data = onion_utils::FixedSizeOnionPacket;
+       fn new(pubkey: PublicKey, hop_data: onion_utils::FixedSizeOnionPacket, hmac: [u8; 32]) -> Self {
+               Self {
+                       version: 0,
+                       public_key: Ok(pubkey),
+                       hop_data: hop_data.0,
+                       hmac,
+               }
+       }
+}
+
 impl PartialEq for OnionPacket {
        fn eq(&self, other: &OnionPacket) -> bool {
                for (i, j) in self.hop_data.iter().zip(other.hop_data.iter()) {
@@ -1327,6 +1349,29 @@ impl_writeable_msg!(UpdateAddHTLC, {
        onion_routing_packet
 }, {});
 
+impl Readable for OnionMessage {
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let blinding_point: PublicKey = Readable::read(r)?;
+               let len: u16 = Readable::read(r)?;
+               let mut packet_reader = FixedLengthReader::new(r, len as u64);
+               let onion_routing_packet: onion_message::Packet = <onion_message::Packet as LengthReadable>::read(&mut packet_reader)?;
+               Ok(Self {
+                       blinding_point,
+                       onion_routing_packet,
+               })
+       }
+}
+
+impl Writeable for OnionMessage {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.blinding_point.write(w)?;
+               let onion_packet_len = self.onion_routing_packet.serialized_length();
+               (onion_packet_len as u16).write(w)?;
+               self.onion_routing_packet.write(w)?;
+               Ok(())
+       }
+}
+
 impl Writeable for FinalOnionHopData {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.payment_secret.0.write(w)?;
@@ -1372,6 +1417,14 @@ impl Writeable for OnionHopData {
        }
 }
 
+// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
+// onion message packets.
+impl ReadableArgs<()> for OnionHopData {
+       fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
+               <Self as Readable>::read(r)
+       }
+}
+
 impl Readable for OnionHopData {
        fn read<R: Read>(mut r: &mut R) -> Result<Self, DecodeError> {
                use bitcoin::consensus::encode::{Decodable, Error, VarInt};
@@ -1514,14 +1567,12 @@ impl_writeable!(ChannelAnnouncement, {
 
 impl Writeable for UnsignedChannelUpdate {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
-               let mut message_flags: u8 = 0;
-               if let OptionalField::Present(_) = self.htlc_maximum_msat {
-                       message_flags = 1;
-               }
+               // `message_flags` used to indicate presence of `htlc_maximum_msat`, but was deprecated in the spec.
+               const MESSAGE_FLAGS: u8 = 1;
                self.chain_hash.write(w)?;
                self.short_channel_id.write(w)?;
                self.timestamp.write(w)?;
-               let all_flags = self.flags as u16 | ((message_flags as u16) << 8);
+               let all_flags = self.flags as u16 | ((MESSAGE_FLAGS as u16) << 8);
                all_flags.write(w)?;
                self.cltv_expiry_delta.write(w)?;
                self.htlc_minimum_msat.write(w)?;
@@ -1535,22 +1586,20 @@ impl Writeable for UnsignedChannelUpdate {
 
 impl Readable for UnsignedChannelUpdate {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
-               let has_htlc_maximum_msat;
                Ok(Self {
                        chain_hash: Readable::read(r)?,
                        short_channel_id: Readable::read(r)?,
                        timestamp: Readable::read(r)?,
                        flags: {
                                let flags: u16 = Readable::read(r)?;
-                               let message_flags = flags >> 8;
-                               has_htlc_maximum_msat = (message_flags as i32 & 1) == 1;
+                               // Note: we ignore the `message_flags` for now, since it was deprecated by the spec.
                                flags as u8
                        },
                        cltv_expiry_delta: Readable::read(r)?,
                        htlc_minimum_msat: Readable::read(r)?,
                        fee_base_msat: Readable::read(r)?,
                        fee_proportional_millionths: Readable::read(r)?,
-                       htlc_maximum_msat: if has_htlc_maximum_msat { Readable::read(r)? } else { OptionalField::Absent },
+                       htlc_maximum_msat: Readable::read(r)?,
                        excess_data: read_to_end(r)?,
                })
        }
@@ -2103,7 +2152,7 @@ mod tests {
                do_encoding_node_announcement(false, false, true, false, true, false, false, false);
        }
 
-       fn do_encoding_channel_update(direction: bool, disable: bool, htlc_maximum_msat: bool, excess_data: bool) {
+       fn do_encoding_channel_update(direction: bool, disable: bool, excess_data: bool) {
                let secp_ctx = Secp256k1::new();
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
@@ -2114,7 +2163,7 @@ mod tests {
                        flags: if direction { 1 } else { 0 } | if disable { 1 << 1 } else { 0 },
                        cltv_expiry_delta: 144,
                        htlc_minimum_msat: 1000000,
-                       htlc_maximum_msat: if htlc_maximum_msat { OptionalField::Present(131355275467161) } else { OptionalField::Absent },
+                       htlc_maximum_msat: 131355275467161,
                        fee_base_msat: 10000,
                        fee_proportional_millionths: 20,
                        excess_data: if excess_data { vec![0, 0, 0, 0, 59, 154, 202, 0] } else { Vec::new() }
@@ -2127,11 +2176,7 @@ mod tests {
                let mut target_value = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
                target_value.append(&mut hex::decode("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f").unwrap());
                target_value.append(&mut hex::decode("00083a840000034d013413a7").unwrap());
-               if htlc_maximum_msat {
-                       target_value.append(&mut hex::decode("01").unwrap());
-               } else {
-                       target_value.append(&mut hex::decode("00").unwrap());
-               }
+               target_value.append(&mut hex::decode("01").unwrap());
                target_value.append(&mut hex::decode("00").unwrap());
                if direction {
                        let flag = target_value.last_mut().unwrap();
@@ -2142,9 +2187,7 @@ mod tests {
                        *flag = *flag | 1 << 1;
                }
                target_value.append(&mut hex::decode("009000000000000f42400000271000000014").unwrap());
-               if htlc_maximum_msat {
-                       target_value.append(&mut hex::decode("0000777788889999").unwrap());
-               }
+               target_value.append(&mut hex::decode("0000777788889999").unwrap());
                if excess_data {
                        target_value.append(&mut hex::decode("000000003b9aca00").unwrap());
                }
@@ -2153,16 +2196,14 @@ mod tests {
 
        #[test]
        fn encoding_channel_update() {
-               do_encoding_channel_update(false, false, false, false);
-               do_encoding_channel_update(false, false, false, true);
-               do_encoding_channel_update(true, false, false, false);
-               do_encoding_channel_update(true, false, false, true);
-               do_encoding_channel_update(false, true, false, false);
-               do_encoding_channel_update(false, true, false, true);
-               do_encoding_channel_update(false, false, true, false);
-               do_encoding_channel_update(false, false, true, true);
-               do_encoding_channel_update(true, true, true, false);
-               do_encoding_channel_update(true, true, true, true);
+               do_encoding_channel_update(false, false, false);
+               do_encoding_channel_update(false, false, true);
+               do_encoding_channel_update(true, false, false);
+               do_encoding_channel_update(true, false, true);
+               do_encoding_channel_update(false, true, false);
+               do_encoding_channel_update(false, true, true);
+               do_encoding_channel_update(true, true, false);
+               do_encoding_channel_update(true, true, true);
        }
 
        fn do_encoding_open_channel(random_bit: bool, shutdown: bool, incl_chan_type: bool) {
index c66f45a44923b596e1e31ac0a08d2ee531826128..e2c6432337fcadf0e5a5a498132681f26a7f997f 100644 (file)
@@ -21,9 +21,9 @@ use routing::gossip::{NetworkUpdate, RoutingFees, NodeId};
 use routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop};
 use ln::features::{InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, ChannelUpdate, OptionalField};
+use ln::msgs::{ChannelMessageHandler, ChannelUpdate};
 use ln::wire::Encode;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use util::ser::{ReadableArgs, Writeable, Writer};
 use util::{byte_utils, test_utils};
 use util::config::{UserConfig, ChannelConfig};
@@ -126,7 +126,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
                                expect_htlc_forward!(&nodes[2]);
                                expect_event!(&nodes[2], Event::PaymentReceived);
                                callback_node();
-                               expect_pending_htlcs_forwardable!(nodes[2]);
+                               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
                        }
 
                        let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -227,7 +227,7 @@ impl msgs::ChannelUpdate {
                                flags: 0,
                                cltv_expiry_delta: 0,
                                htlc_minimum_msat: 0,
-                               htlc_maximum_msat: OptionalField::Absent,
+                               htlc_maximum_msat: msgs::MAX_VALUE_MSAT,
                                fee_base_msat: 0,
                                fee_proportional_millionths: 0,
                                excess_data: vec![],
@@ -1036,7 +1036,7 @@ fn test_phantom_onion_hmac_failure() {
        };
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@ -1108,7 +1108,7 @@ fn test_phantom_invalid_onion_payload() {
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@ -1164,7 +1164,7 @@ fn test_phantom_final_incorrect_cltv_expiry() {
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@ -1210,7 +1210,7 @@ fn test_phantom_failure_too_low_cltv() {
 
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@ -1255,7 +1255,7 @@ fn test_phantom_failure_too_low_recv_amt() {
        nodes[1].node.process_pending_htlc_forwards();
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@ -1352,7 +1352,7 @@ fn test_phantom_failure_reject_payment() {
        nodes[1].node.process_pending_htlc_forwards();
        expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat);
        nodes[1].node.fail_htlc_backwards(&payment_hash);
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
 
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
index b223a344dbe204aafcd511c5efc2d4e035cf1ba9..f81c619d7353997c1019e60ac3b967da9b6ac1b0 100644 (file)
@@ -15,7 +15,7 @@ use routing::gossip::NetworkUpdate;
 use routing::router::RouteHop;
 use util::chacha20::{ChaCha20, ChaChaReader};
 use util::errors::{self, APIError};
-use util::ser::{Readable, Writeable, LengthCalculatingWriter};
+use util::ser::{Readable, ReadableArgs, Writeable, LengthCalculatingWriter};
 use util::logger::Logger;
 
 use bitcoin::hashes::{Hash, HashEngine};
@@ -30,21 +30,29 @@ use bitcoin::secp256k1;
 
 use prelude::*;
 use io::{Cursor, Read};
-use core::convert::TryInto;
+use core::convert::{AsMut, TryInto};
 use core::ops::Deref;
 
-pub(super) struct OnionKeys {
+pub(crate) struct OnionKeys {
        #[cfg(test)]
-       pub(super) shared_secret: SharedSecret,
+       pub(crate) shared_secret: SharedSecret,
        #[cfg(test)]
-       pub(super) blinding_factor: [u8; 32],
-       pub(super) ephemeral_pubkey: PublicKey,
-       pub(super) rho: [u8; 32],
-       pub(super) mu: [u8; 32],
+       pub(crate) blinding_factor: [u8; 32],
+       pub(crate) ephemeral_pubkey: PublicKey,
+       pub(crate) rho: [u8; 32],
+       pub(crate) mu: [u8; 32],
 }
 
 #[inline]
-pub(super) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
+pub(crate) fn gen_rho_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
+       assert_eq!(shared_secret.len(), 32);
+       let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
+       hmac.input(&shared_secret);
+       Hmac::from_engine(hmac).into_inner()
+}
+
+#[inline]
+pub(crate) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
        assert_eq!(shared_secret.len(), 32);
        ({
                let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
@@ -74,7 +82,7 @@ pub(super) fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
        Hmac::from_engine(hmac).into_inner()
 }
 
-pub(super) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, mut packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
+pub(crate) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, mut packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
        let blinding_factor = {
                let mut sha = Sha256::engine();
                sha.input(&packet_pubkey.serialize()[..]);
@@ -187,8 +195,8 @@ pub(super) fn build_onion_payloads(path: &Vec<RouteHop>, total_msat: u64, paymen
 pub(crate) const ONION_DATA_LEN: usize = 20*65;
 
 #[inline]
-fn shift_arr_right(arr: &mut [u8; ONION_DATA_LEN], amt: usize) {
-       for i in (amt..ONION_DATA_LEN).rev() {
+fn shift_slice_right(arr: &mut [u8], amt: usize) {
+       for i in (amt..arr.len()).rev() {
                arr[i] = arr[i-amt];
        }
        for i in 0..amt {
@@ -210,14 +218,15 @@ pub(super) fn route_size_insane(payloads: &Vec<msgs::OnionHopData>) -> bool {
        false
 }
 
-/// panics if route_size_insane(paylods)
+/// panics if route_size_insane(payloads)
 pub(super) fn construct_onion_packet(payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
        let mut packet_data = [0; ONION_DATA_LEN];
 
        let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
        chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
 
-       construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
+       construct_onion_packet_with_init_noise::<_, _>(
+               payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
 }
 
 #[cfg(test)]
@@ -229,12 +238,50 @@ pub(super) fn construct_onion_packet_bogus_hopdata<HD: Writeable>(payloads: Vec<
        let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
        chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
 
-       construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
+       construct_onion_packet_with_init_noise::<_, _>(
+               payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
+}
+
+/// Since onion message packets and onion payment packets have different lengths but are otherwise
+/// identical, we use this trait to allow `construct_onion_packet_with_init_noise` to return either
+/// type.
+pub(crate) trait Packet {
+       type Data: AsMut<[u8]>;
+       fn new(pubkey: PublicKey, hop_data: Self::Data, hmac: [u8; 32]) -> Self;
+}
+
+// Needed for rustc versions older than 1.47 to avoid E0277: "arrays only have std trait
+// implementations for lengths 0..=32".
+pub(crate) struct FixedSizeOnionPacket(pub(crate) [u8; ONION_DATA_LEN]);
+
+impl AsMut<[u8]> for FixedSizeOnionPacket {
+       fn as_mut(&mut self) -> &mut [u8] {
+               &mut self.0
+       }
+}
+
+pub(crate) fn payloads_serialized_length<HD: Writeable>(payloads: &Vec<HD>) -> usize {
+       payloads.iter().map(|p| p.serialized_length() + 32 /* HMAC */).sum()
 }
 
-/// panics if route_size_insane(paylods)
-fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: [u8; ONION_DATA_LEN], associated_data: &PaymentHash) -> msgs::OnionPacket {
+/// panics if payloads_serialized_length(payloads) > packet_data_len
+pub(crate) fn construct_onion_message_packet<HD: Writeable, P: Packet<Data = Vec<u8>>>(
+       payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize) -> P
+{
+       let mut packet_data = vec![0; packet_data_len];
+
+       let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
+       chacha.process_in_place(&mut packet_data);
+
+       construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, packet_data, None)
+}
+
+/// panics if payloads_serialized_length(payloads) > packet_data.len()
+fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
+       mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data, associated_data: Option<&PaymentHash>) -> P
+{
        let filler = {
+               let packet_data = packet_data.as_mut();
                const ONION_HOP_DATA_LEN: usize = 65; // We may decrease this eventually after TLV is common
                let mut res = Vec::with_capacity(ONION_HOP_DATA_LEN * (payloads.len() - 1));
 
@@ -243,7 +290,7 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
                        if i == payloads.len() - 1 { break; }
 
                        let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
-                       for _ in 0..(ONION_DATA_LEN - pos) { // TODO: Batch this.
+                       for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
                                let mut dummy = [0; 1];
                                chacha.process_in_place(&mut dummy); // We don't have a seek function :(
                        }
@@ -251,7 +298,7 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
                        let mut payload_len = LengthCalculatingWriter(0);
                        payload.write(&mut payload_len).expect("Failed to calculate length");
                        pos += payload_len.0 + 32;
-                       assert!(pos <= ONION_DATA_LEN);
+                       assert!(pos <= packet_data.len());
 
                        res.resize(pos, 0u8);
                        chacha.process_in_place(&mut res);
@@ -263,29 +310,28 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
        for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
                let mut payload_len = LengthCalculatingWriter(0);
                payload.write(&mut payload_len).expect("Failed to calculate length");
-               shift_arr_right(&mut packet_data, payload_len.0 + 32);
+
+               let packet_data = packet_data.as_mut();
+               shift_slice_right(packet_data, payload_len.0 + 32);
                packet_data[0..payload_len.0].copy_from_slice(&payload.encode()[..]);
                packet_data[payload_len.0..(payload_len.0 + 32)].copy_from_slice(&hmac_res);
 
                let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
-               chacha.process_in_place(&mut packet_data);
+               chacha.process_in_place(packet_data);
 
                if i == 0 {
                        packet_data[ONION_DATA_LEN - filler.len()..ONION_DATA_LEN].copy_from_slice(&filler[..]);
                }
 
                let mut hmac = HmacEngine::<Sha256>::new(&keys.mu);
-               hmac.input(&packet_data);
-               hmac.input(&associated_data.0[..]);
+               hmac.input(packet_data);
+               if let Some(associated_data) = associated_data {
+                       hmac.input(&associated_data.0[..]);
+               }
                hmac_res = Hmac::from_engine(hmac).into_inner();
        }
 
-       msgs::OnionPacket {
-               version: 0,
-               public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
-               hop_data: packet_data,
-               hmac: hmac_res,
-       }
+       P::new(onion_keys.first().unwrap().ephemeral_pubkey, packet_data, hmac_res)
 }
 
 /// Encrypts a failure packet. raw_packet can either be a
@@ -534,7 +580,50 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
        } else { unreachable!(); }
 }
 
-/// Data decrypted from the onion payload.
+/// An input used when decoding an onion packet.
+pub(crate) trait DecodeInput {
+       type Arg;
+       /// If Some, this is the input when checking the hmac of the onion packet.
+       fn payment_hash(&self) -> Option<&PaymentHash>;
+       /// Read argument when decrypting our hop payload.
+       fn read_arg(self) -> Self::Arg;
+}
+
+impl DecodeInput for PaymentHash {
+       type Arg = ();
+       fn payment_hash(&self) -> Option<&PaymentHash> {
+               Some(self)
+       }
+       fn read_arg(self) -> Self::Arg { () }
+}
+
+impl DecodeInput for SharedSecret {
+       type Arg = SharedSecret;
+       fn payment_hash(&self) -> Option<&PaymentHash> {
+               None
+       }
+       fn read_arg(self) -> Self::Arg { self }
+}
+
+/// Allows `decode_next_hop` to return the next hop packet bytes for either payments or onion
+/// message forwards.
+pub(crate) trait NextPacketBytes: AsMut<[u8]> {
+       fn new(len: usize) -> Self;
+}
+
+impl NextPacketBytes for FixedSizeOnionPacket {
+       fn new(_len: usize) -> Self  {
+               Self([0 as u8; ONION_DATA_LEN])
+       }
+}
+
+impl NextPacketBytes for Vec<u8> {
+       fn new(len: usize) -> Self {
+               vec![0 as u8; len]
+       }
+}
+
+/// Data decrypted from a payment's onion payload.
 pub(crate) enum Hop {
        /// This onion payload was for us, not for forwarding to a next-hop. Contains information for
        /// verifying the incoming payment.
@@ -546,11 +635,12 @@ pub(crate) enum Hop {
                /// HMAC of the next hop's onion packet.
                next_hop_hmac: [u8; 32],
                /// Bytes of the onion packet we're forwarding.
-               new_packet_bytes: [u8; 20*65],
+               new_packet_bytes: [u8; ONION_DATA_LEN],
        },
 }
 
 /// Error returned when we fail to decode the onion packet.
+#[derive(Debug)]
 pub(crate) enum OnionDecodeErr {
        /// The HMAC of the onion packet did not match the hop data.
        Malformed {
@@ -564,11 +654,27 @@ pub(crate) enum OnionDecodeErr {
        },
 }
 
-pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash) -> Result<Hop, OnionDecodeErr> {
+pub(crate) fn decode_next_payment_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash) -> Result<Hop, OnionDecodeErr> {
+       match decode_next_hop(shared_secret, hop_data, hmac_bytes, payment_hash) {
+               Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
+               Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
+                       Ok(Hop::Forward {
+                               next_hop_data,
+                               next_hop_hmac,
+                               new_packet_bytes
+                       })
+               },
+               Err(e) => Err(e),
+       }
+}
+
+pub(crate) fn decode_next_hop<D: DecodeInput, R: ReadableArgs<D::Arg>, N: NextPacketBytes>(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], decode_input: D) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
        let (rho, mu) = gen_rho_mu_from_shared_secret(&shared_secret);
        let mut hmac = HmacEngine::<Sha256>::new(&mu);
        hmac.input(hop_data);
-       hmac.input(&payment_hash.0[..]);
+       if let Some(payment_hash) = decode_input.payment_hash() {
+               hmac.input(&payment_hash.0[..]);
+       }
        if !fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &hmac_bytes) {
                return Err(OnionDecodeErr::Malformed {
                        err_msg: "HMAC Check failed",
@@ -578,7 +684,7 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
 
        let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
        let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&hop_data[..]) };
-       match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
+       match R::read(&mut chacha_stream, decode_input.read_arg()) {
                Err(err) => {
                        let error_code = match err {
                                msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
@@ -616,10 +722,11 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
                                        chacha_stream.read_exact(&mut next_bytes).unwrap();
                                        assert_ne!(next_bytes[..], [0; 32][..]);
                                }
-                               return Ok(Hop::Receive(msg));
+                               return Ok((msg, None)); // We are the final destination for this packet
                        } else {
-                               let mut new_packet_bytes = [0; 20*65];
-                               let read_pos = chacha_stream.read(&mut new_packet_bytes).unwrap();
+                               let mut new_packet_bytes = N::new(hop_data.len());
+                               let read_pos = hop_data.len() - chacha_stream.read.position() as usize;
+                               chacha_stream.read_exact(&mut new_packet_bytes.as_mut()[..read_pos]).unwrap();
                                #[cfg(debug_assertions)]
                                {
                                        // Check two things:
@@ -631,12 +738,8 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
                                }
                                // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
                                // fill the onion hop data we'll forward to our next-hop peer.
-                               chacha_stream.chacha.process_in_place(&mut new_packet_bytes[read_pos..]);
-                               return Ok(Hop::Forward {
-                                       next_hop_data: msg,
-                                       next_hop_hmac: hmac,
-                                       new_packet_bytes,
-                               })
+                               chacha_stream.chacha.process_in_place(&mut new_packet_bytes.as_mut()[read_pos..]);
+                               return Ok((msg, Some((hmac, new_packet_bytes)))) // This packet needs forwarding
                        }
                },
        }
@@ -775,7 +878,7 @@ mod tests {
                        },
                );
 
-               let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
+               let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
                // Just check the final packet encoding, as it includes all the per-hop vectors in it
                // anyway...
                assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
@@ -854,7 +957,7 @@ mod tests {
                        }),
                );
 
-               let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
+               let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
                // Just check the final packet encoding, as it includes all the per-hop vectors in it
                // anyway...
                assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a71a060daf367132b378b3a3883c0e2c0e026b8900b2b5cdbc784e1a3bb913f88a9c50f7d61ab590531cf08000178a333a347f8b4072ed056f820f77774345e183a342ec4729f3d84accf515e88adddb85ecc08daba68404bae9a8e8d7178977d7094a1ae549f89338c0777551f874159eb42d3a59fb9285ad4e24883f27de23942ec966611e99bee1cee503455be9e8e642cef6cef7b9864130f692283f8a973d47a8f1c1726b6e59969385975c766e35737c8d76388b64f748ee7943ffb0e2ee45c57a1abc40762ae598723d21bd184e2b338f68ebff47219357bd19cd7e01e2337b806ef4d717888e129e59cd3dc31e6201ccb2fd6d7499836f37a993262468bcb3a4dcd03a22818aca49c6b7b9b8e9e870045631d8e039b066ff86e0d1b7291f71cefa7264c70404a8e538b566c17ccc5feab231401e6c08a01bd5edfc1aa8e3e533b96e82d1f91118d508924b923531929aea889fcdf050597c681185f336b1da63b0939aa2b7c50b21b5eb7b6ad66c81fab98a3cdf73f658149e7e9ced4edde5d38c9b8f92e16f6b4ab13d7fca6a0e4ecc9f9de611a90da6e99c39551094c56e3196f282c5dffd9fc4b2fc12f3bca8e6fe47eb45fbdd3be21a8a8d200797eae3c9a0497132f92410d804977408494dff49dd3d8bce248e0b74fd9e6f0f7102c25ddfa02bd9ad9f746abbfa337ef811d5345a9e16b60de1767b209645ba40bd1f9a5f75bc04feca9b27c5554be4fe83fac2cb83aa447a817bb85ae966c68b420063833fada375e2f515965e687a45699632902672c654d1d18d7bcbf55e8fa57f63f2da449f8e1e606e8722df081e5f193fc4179feb99ad22819afdeef211f7c54afdba92aeef0c00b7bc2b65a4813c01f907a8377585708f2d4c940a25328e585714c8ded0a9a4d7a6de1027c1cb7a0198cd3db68b58c0704dfd0cfbe624e9cd18cc0ae5d96697bb476708b9ee0403d211e64e0d5a7683a7a9a140c02f0ff1c6e67a302941b4052bdea8a63e70a3ad62c5b89c698f1fd3c7685cb49705096cad702d02d93bcb1c27a409f4c9bddec001205ca4a2740f19b50900be81c7e847f1a863deea8d35701f1355cad8db57b1d4eb2ab4e29587734785abfb46ddede71928213d7d089dfdeda052827f459f1688cc0935bd47e7bcec27427c8376dcce7e22699567c0d145f8a7db33f6758815f1f15f9f7a9760dec4f34ae095edda4c64e9735bdd029c4e32c2ee31ba47ec5e6bdb97813d52dbd15b4e0b7a2c7f790ae64104d99f38c127f0a093288fa34144adb16b8968d4fa7656fcec99de8503dd46d3b03620a71c7cd085364abd30dccf7fbda25a1cdc102600149c9af1c97aa0372cd2e1909f28ac5c686f432b310e79528c9b8b9e8f314c1e74621ce6308ad2278b81d460892e0d9dd38b7c76d58be6dfd10ae7583ee1e7ef5b3f6f78dc60af0950df1b00cc55b6d178ba2e476bea0eaeef49323b83f05804159e7aef4eed4cc60dd07be76f067dfd0bcfb0b806b69ba921336a20c43c832d0cab8fa3ddeb29e3bf07b0d98a112eb07802756235a49d44a8b82a950d84e95e01971f0e106ccb337f07384e21620e0ad39e16ed9edca123226cf55ac44f449eeb53e38a7f27d101806e4823e4efcc887414240ee6826c4a5cb1c6443ad36ebf905a435c1d9054e54173911b17b5b40f60b3d9fd5f12eac54ca1e20191f5f18544d5fd3d665e9bcef96fb44b76110aa64d9db4c86c9513cbdad546538e8aec521fbe83ceac5e74a15629f1ed0b870a1d0d1e5680b6d6100d1bd3f3b9043bd35b8919c4088f1949b8be89e4701eb870f8ed64fafa446c78df3ea").unwrap());
index d3feb96df111766d9625953a8765c1b0a2ff9e8b..785edecebbe3d0b7eddb0f0f6de2011f895fdde1 100644 (file)
@@ -21,7 +21,7 @@ use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
 use ln::msgs::ChannelMessageHandler;
 use routing::router::{PaymentParameters, get_route};
-use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use util::test_utils;
 use util::errors::APIError;
 use util::enforcing_trait_impls::EnforcingSigner;
@@ -43,7 +43,7 @@ fn retry_single_path_payment() {
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
        // Rebalance to find a route
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
 
@@ -62,7 +62,7 @@ fn retry_single_path_payment() {
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(&nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -120,10 +120,10 @@ fn mpp_retry() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_4_id = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+       let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known());
        // Rebalance
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
 
@@ -131,11 +131,11 @@ fn mpp_retry() {
        let path = route.paths[0].clone();
        route.paths.push(path);
        route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
-       route.paths[0][0].short_channel_id = chan_1_id;
-       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+       route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
        route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
-       route.paths[1][0].short_channel_id = chan_2_id;
-       route.paths[1][1].short_channel_id = chan_4_id;
+       route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+       route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
 
        // Initiate the MPP payment.
        let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
@@ -165,7 +165,7 @@ fn mpp_retry() {
 
        // Attempt to forward the payment and complete the 2nd path's failure.
        expect_pending_htlcs_forwardable!(&nodes[2]);
-       expect_pending_htlcs_forwardable!(&nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -206,20 +206,20 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+       let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
 
        let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
        let path = route.paths[0].clone();
        route.paths.push(path);
        route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
-       route.paths[0][0].short_channel_id = chan_1_id;
-       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+       route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
        route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
-       route.paths[1][0].short_channel_id = chan_2_id;
-       route.paths[1][1].short_channel_id = chan_4_id;
+       route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+       route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
 
        // Initiate the MPP payment.
        let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
@@ -237,7 +237,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
                }
 
                // Failed HTLC from node 3 -> 1
-               expect_pending_htlcs_forwardable!(nodes[3]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
                let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
                assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
                nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
@@ -245,7 +245,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
                commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
 
                // Failed HTLC from node 1 -> 0
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
                let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
@@ -280,7 +280,7 @@ fn retry_expired_payment() {
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
        // Rebalance to find a route
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
 
@@ -299,7 +299,7 @@ fn retry_expired_payment() {
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(&nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -803,7 +803,7 @@ fn test_fulfill_restart_failure() {
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        nodes[1].node.fail_htlc_backwards(&payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        check_added_monitors!(nodes[1], 1);
        let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
index 2a026821f8adf91a9f0d2ba9bfdacbc861e1db48..d34fdfeb52a1ddcb887bfd42c4cfcddac8e7d0e9 100644 (file)
@@ -622,8 +622,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// peer using the init message.
        /// The user should pass the remote network address of the host they are connected to.
        ///
-       /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
-       /// descriptor but must disconnect the connection immediately.
+       /// If an `Err` is returned here you must disconnect the connection immediately.
        ///
        /// Returns a small number of bytes to send to the remote node (currently always 50).
        ///
@@ -671,9 +670,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// The user should pass the remote network address of the host they are connected to.
        ///
        /// May refuse the connection by returning an Err, but will never write bytes to the remote end
-       /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
-       /// call socket_disconnected for the new descriptor but must disconnect the connection
-       /// immediately.
+       /// (outbound connector always speaks first). If an `Err` is returned here you must disconnect
+       /// the connection immediately.
        ///
        /// Panics if descriptor is duplicative with some other descriptor which has not yet been
        /// [`socket_disconnected()`].
@@ -1926,11 +1924,18 @@ mod tests {
                peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
                assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
                peer_a.process_events();
-               assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+
+               let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+               assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
                peer_b.process_events();
-               assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+               let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+               assert_eq!(peer_a.read_event(&mut fd_a, &b_data).unwrap(), false);
+
                peer_a.process_events();
-               assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+               let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+               assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
                (fd_a.clone(), fd_b.clone())
        }
 
@@ -2084,14 +2089,16 @@ mod tests {
 
                assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false);
                peers[0].process_events();
-               assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+               let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+               assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
                peers[1].process_events();
 
                // ...but if we get a second timer tick, we should disconnect the peer
                peers[0].timer_tick_occurred();
                assert_eq!(peers[0].peers.read().unwrap().len(), 0);
 
-               assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
+               let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+               assert!(peers[0].read_event(&mut fd_a, &b_data).is_err());
        }
 
        #[test]
index 508ba414037107b260938b6c82bdbf2ad107dd4a..ed0311945e6de9d491986906f5be72c0cacb07ae 100644 (file)
@@ -19,10 +19,10 @@ use routing::gossip::RoutingFees;
 use routing::router::{PaymentParameters, RouteHint, RouteHintHop};
 use ln::features::{InitFeatures, InvoiceFeatures, ChannelTypeFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
 use ln::wire::Encode;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use util::config::UserConfig;
 use util::ser::{Writeable, ReadableArgs};
 use util::test_utils;
@@ -478,7 +478,7 @@ fn test_scid_alias_returned() {
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
-       create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
 
        let last_hop = nodes[2].node.list_usable_channels();
        let mut hop_hints = vec![RouteHint(vec![RouteHintHop {
@@ -508,7 +508,7 @@ fn test_scid_alias_returned() {
        commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true);
 
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -523,7 +523,7 @@ fn test_scid_alias_returned() {
                flags: 1,
                cltv_expiry_delta: accept_forward_cfg.channel_config.cltv_expiry_delta,
                htlc_minimum_msat: 1_000,
-               htlc_maximum_msat: OptionalField::Present(1_000_000), // Defaults to 10% of the channel value
+               htlc_maximum_msat: 1_000_000, // Defaults to 10% of the channel value
                fee_base_msat: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_base_msat,
                fee_proportional_millionths: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_proportional_millionths,
                excess_data: Vec::new(),
index 29349392f761438b311cca2eb98f49ba14bd338f..fab6962e514957209b832c786916575be95fed4c 100644 (file)
@@ -16,7 +16,7 @@ use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
 use ln::features::InitFeatures;
 use ln::msgs::ChannelMessageHandler;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
@@ -82,7 +82,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
-               let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
                check_spends!(node_2_commitment_txn[1], chan_2.3);
@@ -147,7 +147,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        }
 
        check_added_monitors!(nodes[1], 1);
diff --git a/lightning/src/onion_message/blinded_route.rs b/lightning/src/onion_message/blinded_route.rs
new file mode 100644 (file)
index 0000000..d18372e
--- /dev/null
@@ -0,0 +1,153 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Creating blinded routes and related utilities live here.
+
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+
+use chain::keysinterface::{KeysInterface, Sign};
+use super::utils;
+use util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
+use util::ser::{VecWriter, Writeable, Writer};
+
+use core::iter::FromIterator;
+use io;
+use prelude::*;
+
+/// Onion messages can be sent and received to blinded routes, which serve to hide the identity of
+/// the recipient.
+pub struct BlindedRoute {
+       /// To send to a blinded route, the sender first finds a route to the unblinded
+       /// `introduction_node_id`, which can unblind its [`encrypted_payload`] to find out the onion
+       /// message's next hop and forward it along.
+       ///
+       /// [`encrypted_payload`]: BlindedHop::encrypted_payload
+       pub(super) introduction_node_id: PublicKey,
+       /// Used by the introduction node to decrypt its [`encrypted_payload`] to forward the onion
+       /// message.
+       ///
+       /// [`encrypted_payload`]: BlindedHop::encrypted_payload
+       pub(super) blinding_point: PublicKey,
+       /// The hops composing the blinded route.
+       pub(super) blinded_hops: Vec<BlindedHop>,
+}
+
+/// Used to construct the blinded hops portion of a blinded route. These hops cannot be identified
+/// by outside observers and thus can be used to hide the identity of the recipient.
+pub struct BlindedHop {
+       /// The blinded node id of this hop in a blinded route.
+       pub(super) blinded_node_id: PublicKey,
+       /// The encrypted payload intended for this hop in a blinded route.
+       // The node sending to this blinded route will later encode this payload into the onion packet for
+       // this hop.
+       pub(super) encrypted_payload: Vec<u8>,
+}
+
+impl BlindedRoute {
+       /// Create a blinded route to be forwarded along `node_pks`. The last node pubkey in `node_pks`
+       /// will be the destination node.
+       ///
+       /// Errors if less than two hops are provided or if `node_pk`(s) are invalid.
+       //  TODO: make all payloads the same size with padding + add dummy hops
+       pub fn new<Signer: Sign, K: KeysInterface, T: secp256k1::Signing + secp256k1::Verification>
+               (node_pks: &[PublicKey], keys_manager: &K, secp_ctx: &Secp256k1<T>) -> Result<Self, ()>
+       {
+               if node_pks.len() < 2 { return Err(()) }
+               let blinding_secret_bytes = keys_manager.get_secure_random_bytes();
+               let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
+               let introduction_node_id = node_pks[0];
+
+               Ok(BlindedRoute {
+                       introduction_node_id,
+                       blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
+                       blinded_hops: blinded_hops(secp_ctx, node_pks, &blinding_secret).map_err(|_| ())?,
+               })
+       }
+}
+
+/// Construct blinded hops for the given `unblinded_path`.
+fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], session_priv: &SecretKey
+) -> Result<Vec<BlindedHop>, secp256k1::Error> {
+       let mut blinded_hops = Vec::with_capacity(unblinded_path.len());
+
+       let mut prev_ss_and_blinded_node_id = None;
+       utils::construct_keys_callback(secp_ctx, unblinded_path, None, session_priv, |blinded_node_id, _, _, encrypted_payload_ss, unblinded_pk, _| {
+               if let Some((prev_ss, prev_blinded_node_id)) = prev_ss_and_blinded_node_id {
+                       if let Some(pk) = unblinded_pk {
+                               let payload = ForwardTlvs {
+                                       next_node_id: pk,
+                                       next_blinding_override: None,
+                               };
+                               blinded_hops.push(BlindedHop {
+                                       blinded_node_id: prev_blinded_node_id,
+                                       encrypted_payload: encrypt_payload(payload, prev_ss),
+                               });
+                       } else { debug_assert!(false); }
+               }
+               prev_ss_and_blinded_node_id = Some((encrypted_payload_ss, blinded_node_id));
+       })?;
+
+       if let Some((final_ss, final_blinded_node_id)) = prev_ss_and_blinded_node_id {
+               let final_payload = ReceiveTlvs { path_id: None };
+               blinded_hops.push(BlindedHop {
+                       blinded_node_id: final_blinded_node_id,
+                       encrypted_payload: encrypt_payload(final_payload, final_ss),
+               });
+       } else { debug_assert!(false) }
+
+       Ok(blinded_hops)
+}
+
+/// Encrypt TLV payload to be used as a [`BlindedHop::encrypted_payload`].
+fn encrypt_payload<P: Writeable>(payload: P, encrypted_tlvs_ss: [u8; 32]) -> Vec<u8> {
+       let mut writer = VecWriter(Vec::new());
+       let write_adapter = ChaChaPolyWriteAdapter::new(encrypted_tlvs_ss, &payload);
+       write_adapter.write(&mut writer).expect("In-memory writes cannot fail");
+       writer.0
+}
+
+/// TLVs to encode in an intermediate onion message packet's hop data. When provided in a blinded
+/// route, they are encoded into [`BlindedHop::encrypted_payload`].
+pub(crate) struct ForwardTlvs {
+       /// The node id of the next hop in the onion message's path.
+       pub(super) next_node_id: PublicKey,
+       /// Senders to a blinded route use this value to concatenate the route they find to the
+       /// introduction node with the blinded route.
+       pub(super) next_blinding_override: Option<PublicKey>,
+}
+
+/// Similar to [`ForwardTlvs`], but these TLVs are for the final node.
+pub(crate) struct ReceiveTlvs {
+       /// If `path_id` is `Some`, it is used to identify the blinded route that this onion message is
+       /// sending to. This is useful for receivers to check that said blinded route is being used in
+       /// the right context.
+       pub(super) path_id: Option<[u8; 32]>,
+}
+
+impl Writeable for ForwardTlvs {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               // TODO: write padding
+               encode_tlv_stream!(writer, {
+                       (4, self.next_node_id, required),
+                       (8, self.next_blinding_override, option)
+               });
+               Ok(())
+       }
+}
+
+impl Writeable for ReceiveTlvs {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               // TODO: write padding
+               encode_tlv_stream!(writer, {
+                       (6, self.path_id, option),
+               });
+               Ok(())
+       }
+}
diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs
new file mode 100644 (file)
index 0000000..695064e
--- /dev/null
@@ -0,0 +1,142 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion message testing and test utilities live here.
+
+use chain::keysinterface::{KeysInterface, Recipient};
+use super::{BlindedRoute, Destination, OnionMessenger, SendError};
+use util::enforcing_trait_impls::EnforcingSigner;
+use util::test_utils;
+
+use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+
+use sync::Arc;
+
+struct MessengerNode {
+       keys_manager: Arc<test_utils::TestKeysInterface>,
+       messenger: OnionMessenger<EnforcingSigner, Arc<test_utils::TestKeysInterface>, Arc<test_utils::TestLogger>>,
+       logger: Arc<test_utils::TestLogger>,
+}
+
+impl MessengerNode {
+       fn get_node_pk(&self) -> PublicKey {
+               let secp_ctx = Secp256k1::new();
+               PublicKey::from_secret_key(&secp_ctx, &self.keys_manager.get_node_secret(Recipient::Node).unwrap())
+       }
+}
+
+fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
+       let mut res = Vec::new();
+       for i in 0..num_messengers {
+               let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
+               let seed = [i as u8; 32];
+               let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               res.push(MessengerNode {
+                       keys_manager: keys_manager.clone(),
+                       messenger: OnionMessenger::new(keys_manager, logger.clone()),
+                       logger,
+               });
+       }
+       res
+}
+
+fn pass_along_path(mut path: Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>) {
+       let mut prev_node = path.remove(0);
+       let num_nodes = path.len();
+       for (idx, node) in path.into_iter().enumerate() {
+               let events = prev_node.messenger.release_pending_msgs();
+               assert_eq!(events.len(), 1);
+               let onion_msg =  {
+                       let msgs = events.get(&node.get_node_pk()).unwrap();
+                       assert_eq!(msgs.len(), 1);
+                       msgs[0].clone()
+               };
+               node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
+               if idx == num_nodes - 1 {
+                       node.logger.assert_log_contains(
+                               "lightning::onion_message::messenger".to_string(),
+                               format!("Received an onion message with path_id: {:02x?}", expected_path_id).to_string(), 1);
+               }
+               prev_node = node;
+       }
+}
+
+#[test]
+fn one_hop() {
+       let nodes = create_nodes(2);
+
+       nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk())).unwrap();
+       pass_along_path(nodes, None);
+}
+
+#[test]
+fn two_unblinded_hops() {
+       let nodes = create_nodes(3);
+
+       nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk()], Destination::Node(nodes[2].get_node_pk())).unwrap();
+       pass_along_path(nodes, None);
+}
+
+#[test]
+fn two_unblinded_two_blinded() {
+       let nodes = create_nodes(5);
+
+       let secp_ctx = Secp256k1::new();
+       let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[3].get_node_pk(), nodes[4].get_node_pk()], &*nodes[4].keys_manager, &secp_ctx).unwrap();
+
+       nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::BlindedRoute(blinded_route)).unwrap();
+       pass_along_path(nodes, None);
+}
+
+#[test]
+fn three_blinded_hops() {
+       let nodes = create_nodes(4);
+
+       let secp_ctx = Secp256k1::new();
+       let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+
+       nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap();
+       pass_along_path(nodes, None);
+}
+
+#[test]
+fn too_big_packet_error() {
+       // Make sure we error as expected if a packet is too big to send.
+       let nodes = create_nodes(1);
+
+       let hop_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+       let secp_ctx = Secp256k1::new();
+       let hop_node_id = PublicKey::from_secret_key(&secp_ctx, &hop_secret);
+
+       let hops = [hop_node_id; 400];
+       let err = nodes[0].messenger.send_onion_message(&hops, Destination::Node(hop_node_id)).unwrap_err();
+       assert_eq!(err, SendError::TooBigPacket);
+}
+
+#[test]
+fn invalid_blinded_route_error() {
+       // Make sure we error as expected if a provided blinded route has 0 or 1 hops.
+       let mut nodes = create_nodes(3);
+       let (node1, node2, node3) = (nodes.remove(0), nodes.remove(0), nodes.remove(0));
+
+       // 0 hops
+       let secp_ctx = Secp256k1::new();
+       let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[node2.get_node_pk(), node3.get_node_pk()], &*node3.keys_manager, &secp_ctx).unwrap();
+       blinded_route.blinded_hops.clear();
+       let err = node1.messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap_err();
+       assert_eq!(err, SendError::TooFewBlindedHops);
+
+       // 1 hop
+       let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[node2.get_node_pk(), node3.get_node_pk()], &*node3.keys_manager, &secp_ctx).unwrap();
+       blinded_route.blinded_hops.remove(0);
+       assert_eq!(blinded_route.blinded_hops.len(), 1);
+       let err = node1.messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap_err();
+       assert_eq!(err, SendError::TooFewBlindedHops);
+}
diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs
new file mode 100644 (file)
index 0000000..7eba3cd
--- /dev/null
@@ -0,0 +1,383 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
+//! more information.
+
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+
+use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
+use ln::msgs;
+use ln::onion_utils;
+use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
+use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
+use super::utils;
+use util::logger::Logger;
+
+use core::ops::Deref;
+use sync::{Arc, Mutex};
+use prelude::*;
+
+/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
+/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
+/// and receiving empty onion messages is supported.
+///
+/// # Example
+///
+//  Needs to be `ignore` until the `onion_message` module is made public, otherwise this is a test
+//  failure.
+/// ```ignore
+/// # extern crate bitcoin;
+/// # use bitcoin::hashes::_export::_core::time::Duration;
+/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
+/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
+/// # use lightning::util::logger::{Logger, Record};
+/// # use std::sync::Arc;
+/// # struct FakeLogger {};
+/// # impl Logger for FakeLogger {
+/// #     fn log(&self, record: &Record) { unimplemented!() }
+/// # }
+/// # let seed = [42u8; 32];
+/// # let time = Duration::from_secs(123456);
+/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
+/// # let logger = Arc::new(FakeLogger {});
+/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+/// # let secp_ctx = Secp256k1::new();
+/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
+/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
+/// hop_node_id1);
+/// # let destination_node_id = hop_node_id1;
+/// #
+/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
+/// // ChannelManager.
+/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
+///
+/// // Send an empty onion message to a node id.
+/// let intermediate_hops = [hop_node_id1, hop_node_id2];
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id));
+///
+/// // Create a blinded route to yourself, for someone to send an onion message to.
+/// # let your_node_id = hop_node_id1;
+/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
+/// let blinded_route = BlindedRoute::new::<InMemorySigner, _, _>(&hops, &keys_manager, &secp_ctx).unwrap();
+///
+/// // Send an empty onion message to a blinded route.
+/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route));
+/// ```
+///
+/// [offers]: <https://github.com/lightning/bolts/pull/798>
+/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       keys_manager: K,
+       logger: L,
+       pending_messages: Mutex<HashMap<PublicKey, Vec<msgs::OnionMessage>>>,
+       secp_ctx: Secp256k1<secp256k1::All>,
+       // Coming soon:
+       // invoice_handler: InvoiceHandler,
+       // custom_handler: CustomHandler, // handles custom onion messages
+}
+
+/// The destination of an onion message.
+pub enum Destination {
+       /// We're sending this onion message to a node.
+       Node(PublicKey),
+       /// We're sending this onion message to a blinded route.
+       BlindedRoute(BlindedRoute),
+}
+
+impl Destination {
+       pub(super) fn num_hops(&self) -> usize {
+               match self {
+                       Destination::Node(_) => 1,
+                       Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
+               }
+       }
+}
+
+/// Errors that may occur when [sending an onion message].
+///
+/// [sending an onion message]: OnionMessenger::send_onion_message
+#[derive(Debug, PartialEq)]
+pub enum SendError {
+       /// Errored computing onion message packet keys.
+       Secp256k1(secp256k1::Error),
+       /// Because implementations such as Eclair will drop onion messages where the message packet
+       /// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
+       TooBigPacket,
+       /// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
+       /// blinded hops.
+       TooFewBlindedHops,
+}
+
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       /// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
+       /// their respective handlers.
+       pub fn new(keys_manager: K, logger: L) -> Self {
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
+               OnionMessenger {
+                       keys_manager,
+                       pending_messages: Mutex::new(HashMap::new()),
+                       secp_ctx,
+                       logger,
+               }
+       }
+
+       /// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
+       /// See [`OnionMessenger`] for example usage.
+       pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination) -> Result<(), SendError> {
+               if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
+                       if blinded_hops.len() < 2 {
+                               return Err(SendError::TooFewBlindedHops);
+                       }
+               }
+               let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
+               let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
+               let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
+                       (intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
+               } else {
+                       match destination {
+                               Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
+                               Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point, .. }) =>
+                                       (introduction_node_id, blinding_point),
+                       }
+               };
+               let (packet_payloads, packet_keys) = packet_payloads_and_keys(
+                       &self.secp_ctx, intermediate_nodes, destination, &blinding_secret)
+                       .map_err(|e| SendError::Secp256k1(e))?;
+
+               let prng_seed = self.keys_manager.get_secure_random_bytes();
+               let onion_packet = construct_onion_message_packet(
+                       packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
+
+               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
+               let pending_msgs = pending_per_peer_msgs.entry(introduction_node_id).or_insert(Vec::new());
+               pending_msgs.push(
+                       msgs::OnionMessage {
+                               blinding_point,
+                               onion_routing_packet: onion_packet,
+                       }
+               );
+               Ok(())
+       }
+
+       /// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
+       /// soon we'll delegate the onion message to a handler that can generate invoices or send
+       /// payments.
+       pub fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
+               let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
+                       Ok(ss) => ss,
+                       Err(e) =>  {
+                               log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
+                               return
+                       }
+               };
+               let onion_decode_ss = {
+                       let blinding_factor = {
+                               let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+                               hmac.input(control_tlvs_ss.as_ref());
+                               Hmac::from_engine(hmac).into_inner()
+                       };
+                       match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
+                               Some(&blinding_factor))
+                       {
+                               Ok(ss) => ss.secret_bytes(),
+                               Err(()) => {
+                                       log_trace!(self.logger, "Failed to compute onion packet shared secret");
+                                       return
+                               }
+                       }
+               };
+               match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
+                       msg.onion_routing_packet.hmac, control_tlvs_ss)
+               {
+                       Ok((Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id })
+                       }, None)) => {
+                               log_info!(self.logger, "Received an onion message with path_id: {:02x?}", path_id);
+                       },
+                       Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
+                               next_node_id, next_blinding_override
+                       })), Some((next_hop_hmac, new_packet_bytes)))) => {
+                               // TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
+                               // blinded hop and this onion message is destined for us. In this situation, we should keep
+                               // unwrapping the onion layers to get to the final payload. Since we don't have the option
+                               // of creating blinded routes with dummy hops currently, we should be ok to not handle this
+                               // for now.
+                               let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
+                                       Ok(pk) => pk,
+                                       Err(e) => {
+                                               log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
+                                               return
+                                       }
+                               };
+                               let outgoing_packet = Packet {
+                                       version: 0,
+                                       public_key: new_pubkey,
+                                       hop_data: new_packet_bytes,
+                                       hmac: next_hop_hmac,
+                               };
+
+                               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
+                               let pending_msgs = pending_per_peer_msgs.entry(next_node_id).or_insert(Vec::new());
+                               pending_msgs.push(
+                                       msgs::OnionMessage {
+                                               blinding_point: match next_blinding_override {
+                                                       Some(blinding_point) => blinding_point,
+                                                       None => {
+                                                               let blinding_factor = {
+                                                                       let mut sha = Sha256::engine();
+                                                                       sha.input(&msg.blinding_point.serialize()[..]);
+                                                                       sha.input(control_tlvs_ss.as_ref());
+                                                                       Sha256::from_engine(sha).into_inner()
+                                                               };
+                                                               let mut next_blinding_point = msg.blinding_point;
+                                                               if let Err(e) = next_blinding_point.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+                                                                       log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
+                                                                       return
+                                                               }
+                                                               next_blinding_point
+                                                       },
+                                               },
+                                               onion_routing_packet: outgoing_packet,
+                                       },
+                               );
+                       },
+                       Err(e) => {
+                               log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
+                       },
+                       _ => {
+                               log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
+                       },
+               };
+       }
+
+       #[cfg(test)]
+       pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, Vec<msgs::OnionMessage>> {
+               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               let mut msgs = HashMap::new();
+               core::mem::swap(&mut *pending_msgs, &mut msgs);
+               msgs
+       }
+}
+
+// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
+// produces
+/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
+/// [`SimpleArcPeerManager`]. See their docs for more details.
+///
+///[`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
+///[`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
+pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
+/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
+/// [`SimpleRefPeerManager`]. See their docs for more details.
+///
+///[`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
+///[`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
+pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
+
+/// Construct onion packet payloads and keys for sending an onion message along the given
+/// `unblinded_path` to the given `destination`.
+fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, session_priv: &SecretKey
+) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
+       let num_hops = unblinded_path.len() + destination.num_hops();
+       let mut payloads = Vec::with_capacity(num_hops);
+       let mut onion_packet_keys = Vec::with_capacity(num_hops);
+
+       let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
+               introduction_node_id, blinding_point, blinded_hops }) = &destination {
+               (Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
+       let num_unblinded_hops = num_hops - num_blinded_hops;
+
+       let mut unblinded_path_idx = 0;
+       let mut blinded_path_idx = 0;
+       let mut prev_control_tlvs_ss = None;
+       utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
+               if num_unblinded_hops != 0 && unblinded_path_idx < num_unblinded_hops {
+                       if let Some(ss) = prev_control_tlvs_ss.take() {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
+                                       ForwardTlvs {
+                                               next_node_id: unblinded_pk_opt.unwrap(),
+                                               next_blinding_override: None,
+                                       }
+                               )), ss));
+                       }
+                       prev_control_tlvs_ss = Some(control_tlvs_ss);
+                       unblinded_path_idx += 1;
+               } else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
+                       if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
+                                       next_node_id: intro_node_id,
+                                       next_blinding_override: Some(blinding_pt),
+                               })), control_tlvs_ss));
+                       }
+                       if let Some(encrypted_payload) = enc_payload_opt {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
+                                       control_tlvs_ss));
+                       } else { debug_assert!(false); }
+                       blinded_path_idx += 1;
+               } else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
+                       payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
+                               control_tlvs_ss));
+                       blinded_path_idx += 1;
+               } else if let Some(encrypted_payload) = enc_payload_opt {
+                       payloads.push((Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
+                       }, control_tlvs_ss));
+               }
+
+               let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
+               onion_packet_keys.push(onion_utils::OnionKeys {
+                       #[cfg(test)]
+                       shared_secret: onion_packet_ss,
+                       #[cfg(test)]
+                       blinding_factor: [0; 32],
+                       ephemeral_pubkey,
+                       rho,
+                       mu,
+               });
+       })?;
+
+       if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
+               payloads.push((Payload::Receive {
+                       control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, })
+               }, control_tlvs_ss));
+       }
+
+       Ok((payloads, onion_packet_keys))
+}
+
+/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
+fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
+       // Spec rationale:
+       // "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
+       // onion, but this should be used sparingly as it is reduces anonymity set, hence the
+       // recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
+       let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
+       let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
+               SMALL_PACKET_HOP_DATA_LEN
+       } else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
+               BIG_PACKET_HOP_DATA_LEN
+       } else { return Err(()) };
+
+       Ok(onion_utils::construct_onion_message_packet::<_, _>(
+               payloads, onion_keys, prng_seed, hop_data_len))
+}
diff --git a/lightning/src/onion_message/mod.rs b/lightning/src/onion_message/mod.rs
new file mode 100644 (file)
index 0000000..2e23b76
--- /dev/null
@@ -0,0 +1,33 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion Messages: sending, receiving, forwarding, and ancillary utilities live here
+//!
+//! Onion messages are multi-purpose messages sent between peers over the lightning network. In the
+//! near future, they will be used to communicate invoices for [offers], unlocking use cases such as
+//! static invoices, refunds and proof of payer. Further, you will be able to accept payments
+//! without revealing your node id through the use of [blinded routes].
+//!
+//! LDK sends and receives onion messages via the [`OnionMessenger`]. See its documentation for more
+//! information on its usage.
+//!
+//! [offers]: <https://github.com/lightning/bolts/pull/798>
+//! [blinded routes]: crate::onion_message::BlindedRoute
+
+mod blinded_route;
+mod messenger;
+mod packet;
+mod utils;
+#[cfg(test)]
+mod functional_tests;
+
+// Re-export structs so they can be imported with just the `onion_message::` module prefix.
+pub use self::blinded_route::{BlindedRoute, BlindedHop};
+pub use self::messenger::{Destination, OnionMessenger, SendError, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
+pub(crate) use self::packet::Packet;
diff --git a/lightning/src/onion_message/packet.rs b/lightning/src/onion_message/packet.rs
new file mode 100644 (file)
index 0000000..a3414d8
--- /dev/null
@@ -0,0 +1,250 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Structs and enums useful for constructing and reading an onion message packet.
+
+use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::ecdh::SharedSecret;
+
+use ln::msgs::DecodeError;
+use ln::onion_utils;
+use super::blinded_route::{ForwardTlvs, ReceiveTlvs};
+use util::chacha20poly1305rfc::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
+use util::ser::{FixedLengthReader, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer};
+
+use core::cmp;
+use io::{self, Read};
+use prelude::*;
+
+// Per the spec, an onion message packet's `hop_data` field length should be
+// SMALL_PACKET_HOP_DATA_LEN if it fits, else BIG_PACKET_HOP_DATA_LEN if it fits.
+pub(super) const SMALL_PACKET_HOP_DATA_LEN: usize = 1300;
+pub(super) const BIG_PACKET_HOP_DATA_LEN: usize = 32768;
+
+#[derive(Clone, Debug, PartialEq)]
+pub(crate) struct Packet {
+       pub(super) version: u8,
+       pub(super) public_key: PublicKey,
+       // Unlike the onion packets used for payments, onion message packets can have payloads greater
+       // than 1300 bytes.
+       // TODO: if 1300 ends up being the most common size, optimize this to be:
+       // enum { ThirteenHundred([u8; 1300]), VarLen(Vec<u8>) }
+       pub(super) hop_data: Vec<u8>,
+       pub(super) hmac: [u8; 32],
+}
+
+impl onion_utils::Packet for Packet {
+       type Data = Vec<u8>;
+       fn new(public_key: PublicKey, hop_data: Vec<u8>, hmac: [u8; 32]) -> Packet {
+               Self {
+                       version: 0,
+                       public_key,
+                       hop_data,
+                       hmac,
+               }
+       }
+}
+
+impl Writeable for Packet {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.version.write(w)?;
+               self.public_key.write(w)?;
+               w.write_all(&self.hop_data)?;
+               self.hmac.write(w)?;
+               Ok(())
+       }
+}
+
+impl LengthReadable for Packet {
+       fn read<R: LengthRead>(r: &mut R) -> Result<Self, DecodeError> {
+               const READ_BUFFER_SIZE: usize = 4096;
+
+               let version = Readable::read(r)?;
+               let public_key = Readable::read(r)?;
+
+               let mut hop_data = Vec::new();
+               let hop_data_len = r.total_bytes() as usize - 66; // 1 (version) + 33 (pubkey) + 32 (HMAC) = 66
+               let mut read_idx = 0;
+               while read_idx < hop_data_len {
+                       let mut read_buffer = [0; READ_BUFFER_SIZE];
+                       let read_amt = cmp::min(hop_data_len - read_idx, READ_BUFFER_SIZE);
+                       r.read_exact(&mut read_buffer[..read_amt]);
+                       hop_data.extend_from_slice(&read_buffer[..read_amt]);
+                       read_idx += read_amt;
+               }
+
+               let hmac = Readable::read(r)?;
+               Ok(Packet {
+                       version,
+                       public_key,
+                       hop_data,
+                       hmac,
+               })
+       }
+}
+
+/// Onion message payloads contain "control" TLVs and "data" TLVs. Control TLVs are used to route
+/// the onion message from hop to hop and for path verification, whereas data TLVs contain the onion
+/// message content itself, such as an invoice request.
+pub(super) enum Payload {
+       /// This payload is for an intermediate hop.
+       Forward(ForwardControlTlvs),
+       /// This payload is for the final hop.
+       Receive {
+               control_tlvs: ReceiveControlTlvs,
+               // Coming soon:
+               // reply_path: Option<BlindedRoute>,
+               // message: Message,
+       }
+}
+
+// Coming soon:
+// enum Message {
+//     InvoiceRequest(InvoiceRequest),
+//     Invoice(Invoice),
+//     InvoiceError(InvoiceError),
+//     CustomMessage<T>,
+// }
+
+/// Forward control TLVs in their blinded and unblinded form.
+pub(super) enum ForwardControlTlvs {
+       /// If we're sending to a blinded route, the node that constructed the blinded route has provided
+       /// this hop's control TLVs, already encrypted into bytes.
+       Blinded(Vec<u8>),
+       /// If we're constructing an onion message hop through an intermediate unblinded node, we'll need
+       /// to construct the intermediate hop's control TLVs in their unblinded state to avoid encoding
+       /// them into an intermediate Vec. See [`super::blinded_route::ForwardTlvs`] for more info.
+       Unblinded(ForwardTlvs),
+}
+
+/// Receive control TLVs in their blinded and unblinded form.
+pub(super) enum ReceiveControlTlvs {
+       /// See [`ForwardControlTlvs::Blinded`].
+       Blinded(Vec<u8>),
+       /// See [`ForwardControlTlvs::Unblinded`] and [`super::blinded_route::ReceiveTlvs`].
+       Unblinded(ReceiveTlvs),
+}
+
+// Uses the provided secret to simultaneously encode and encrypt the unblinded control TLVs.
+impl Writeable for (Payload, [u8; 32]) {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               match &self.0 {
+                       Payload::Forward(ForwardControlTlvs::Blinded(encrypted_bytes)) |
+                       Payload::Receive { control_tlvs: ReceiveControlTlvs::Blinded(encrypted_bytes)} => {
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (4, encrypted_bytes, vec_type)
+                               })
+                       },
+                       Payload::Forward(ForwardControlTlvs::Unblinded(control_tlvs)) => {
+                               let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (4, write_adapter, required)
+                               })
+                       },
+                       Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(control_tlvs)} => {
+                               let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (4, write_adapter, required)
+                               })
+                       },
+               }
+               Ok(())
+       }
+}
+
+// Uses the provided secret to simultaneously decode and decrypt the control TLVs.
+impl ReadableArgs<SharedSecret> for Payload {
+       fn read<R: Read>(mut r: &mut R, encrypted_tlvs_ss: SharedSecret) -> Result<Self, DecodeError> {
+               use bitcoin::consensus::encode::{Decodable, Error, VarInt};
+               let v: VarInt = Decodable::consensus_decode(&mut r)
+                       .map_err(|e| match e {
+                               Error::Io(ioe) => DecodeError::from(ioe),
+                               _ => DecodeError::InvalidValue
+                       })?;
+
+               let mut rd = FixedLengthReader::new(r, v.0);
+               // TODO: support reply paths
+               let mut _reply_path_bytes: Option<Vec<u8>> = Some(Vec::new());
+               let mut read_adapter: Option<ChaChaPolyReadAdapter<ControlTlvs>> = None;
+               let rho = onion_utils::gen_rho_from_shared_secret(&encrypted_tlvs_ss.secret_bytes());
+               decode_tlv_stream!(&mut rd, {
+                       (2, _reply_path_bytes, vec_type),
+                       (4, read_adapter, (option: LengthReadableArgs, rho))
+               });
+               rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
+
+               match read_adapter {
+                       None => return Err(DecodeError::InvalidValue),
+                       Some(ChaChaPolyReadAdapter { readable: ControlTlvs::Forward(tlvs)}) => {
+                               Ok(Payload::Forward(ForwardControlTlvs::Unblinded(tlvs)))
+                       },
+                       Some(ChaChaPolyReadAdapter { readable: ControlTlvs::Receive(tlvs)}) => {
+                               Ok(Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(tlvs)})
+                       },
+               }
+       }
+}
+
+/// When reading a packet off the wire, we don't know a priori whether the packet is to be forwarded
+/// or received. Thus we read a ControlTlvs rather than reading a ForwardControlTlvs or
+/// ReceiveControlTlvs directly.
+pub(super) enum ControlTlvs {
+       /// This onion message is intended to be forwarded.
+       Forward(ForwardTlvs),
+       /// This onion message is intended to be received.
+       Receive(ReceiveTlvs),
+}
+
+impl Readable for ControlTlvs {
+       fn read<R: Read>(mut r: &mut R) -> Result<Self, DecodeError> {
+               let mut _padding: Option<Padding> = None;
+               let mut _short_channel_id: Option<u64> = None;
+               let mut next_node_id: Option<PublicKey> = None;
+               let mut path_id: Option<[u8; 32]> = None;
+               let mut next_blinding_override: Option<PublicKey> = None;
+               decode_tlv_stream!(&mut r, {
+                       (1, _padding, option),
+                       (2, _short_channel_id, option),
+                       (4, next_node_id, option),
+                       (6, path_id, option),
+                       (8, next_blinding_override, option),
+               });
+
+               let valid_fwd_fmt  = next_node_id.is_some() && path_id.is_none();
+               let valid_recv_fmt = next_node_id.is_none() && next_blinding_override.is_none();
+
+               let payload_fmt = if valid_fwd_fmt {
+                       ControlTlvs::Forward(ForwardTlvs {
+                               next_node_id: next_node_id.unwrap(),
+                               next_blinding_override,
+                       })
+               } else if valid_recv_fmt {
+                       ControlTlvs::Receive(ReceiveTlvs {
+                               path_id,
+                       })
+               } else {
+                       return Err(DecodeError::InvalidValue)
+               };
+
+               Ok(payload_fmt)
+       }
+}
+
+/// Reads padding to the end, ignoring what's read.
+pub(crate) struct Padding {}
+impl Readable for Padding {
+       #[inline]
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               loop {
+                       let mut buf = [0; 8192];
+                       if reader.read(&mut buf[..])? == 0 { break; }
+               }
+               Ok(Self {})
+       }
+}
diff --git a/lightning/src/onion_message/utils.rs b/lightning/src/onion_message/utils.rs
new file mode 100644 (file)
index 0000000..9b95183
--- /dev/null
@@ -0,0 +1,100 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion message utility methods live here.
+
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::ecdh::SharedSecret;
+
+use ln::onion_utils;
+use super::blinded_route::BlindedRoute;
+use super::messenger::Destination;
+
+use prelude::*;
+
+// TODO: DRY with onion_utils::construct_onion_keys_callback
+#[inline]
+pub(super) fn construct_keys_callback<T: secp256k1::Signing + secp256k1::Verification,
+       FType: FnMut(PublicKey, SharedSecret, PublicKey, [u8; 32], Option<PublicKey>, Option<Vec<u8>>)>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Option<Destination>,
+       session_priv: &SecretKey, mut callback: FType
+) -> Result<(), secp256k1::Error> {
+       let mut msg_blinding_point_priv = session_priv.clone();
+       let mut msg_blinding_point = PublicKey::from_secret_key(secp_ctx, &msg_blinding_point_priv);
+       let mut onion_packet_pubkey_priv = msg_blinding_point_priv.clone();
+       let mut onion_packet_pubkey = msg_blinding_point.clone();
+
+       macro_rules! build_keys {
+               ($pk: expr, $blinded: expr, $encrypted_payload: expr) => {{
+                       let encrypted_data_ss = SharedSecret::new(&$pk, &msg_blinding_point_priv);
+
+                       let blinded_hop_pk = if $blinded { $pk } else {
+                               let hop_pk_blinding_factor = {
+                                       let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+                                       hmac.input(encrypted_data_ss.as_ref());
+                                       Hmac::from_engine(hmac).into_inner()
+                               };
+                               let mut unblinded_pk = $pk;
+                               unblinded_pk.mul_assign(secp_ctx, &hop_pk_blinding_factor)?;
+                               unblinded_pk
+                       };
+                       let onion_packet_ss = SharedSecret::new(&blinded_hop_pk, &onion_packet_pubkey_priv);
+
+                       let rho = onion_utils::gen_rho_from_shared_secret(encrypted_data_ss.as_ref());
+                       let unblinded_pk_opt = if $blinded { None } else { Some($pk) };
+                       callback(blinded_hop_pk, onion_packet_ss, onion_packet_pubkey, rho, unblinded_pk_opt, $encrypted_payload);
+                       (encrypted_data_ss, onion_packet_ss)
+               }}
+       }
+
+       macro_rules! build_keys_in_loop {
+               ($pk: expr, $blinded: expr, $encrypted_payload: expr) => {
+                       let (encrypted_data_ss, onion_packet_ss) = build_keys!($pk, $blinded, $encrypted_payload);
+
+                       let msg_blinding_point_blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&msg_blinding_point.serialize()[..]);
+                               sha.input(encrypted_data_ss.as_ref());
+                               Sha256::from_engine(sha).into_inner()
+                       };
+
+                       msg_blinding_point_priv.mul_assign(&msg_blinding_point_blinding_factor)?;
+                       msg_blinding_point = PublicKey::from_secret_key(secp_ctx, &msg_blinding_point_priv);
+
+                       let onion_packet_pubkey_blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&onion_packet_pubkey.serialize()[..]);
+                               sha.input(onion_packet_ss.as_ref());
+                               Sha256::from_engine(sha).into_inner()
+                       };
+                       onion_packet_pubkey_priv.mul_assign(&onion_packet_pubkey_blinding_factor)?;
+                       onion_packet_pubkey = PublicKey::from_secret_key(secp_ctx, &onion_packet_pubkey_priv);
+               };
+       }
+
+       for pk in unblinded_path {
+               build_keys_in_loop!(*pk, false, None);
+       }
+       if let Some(dest) = destination {
+               match dest {
+                       Destination::Node(pk) => {
+                               build_keys!(pk, false, None);
+                       },
+                       Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => {
+                               for hop in blinded_hops {
+                                       build_keys_in_loop!(hop.blinded_node_id, true, Some(hop.encrypted_payload));
+                               }
+                       },
+               }
+       }
+       Ok(())
+}
index 10ce74b971e6eebd13054257826aa78cbec94276..37bf1ea5a6f9757d1da5a29f636aa0a853f2de8e 100644 (file)
@@ -25,15 +25,16 @@ use chain;
 use chain::Access;
 use ln::features::{ChannelFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
-use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField, GossipTimestampFilter};
+use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
 use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use ln::msgs;
-use util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable};
 use util::logger::{Logger, Level};
 use util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider};
 use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
 
 use io;
+use io_extras::{copy, sink};
 use prelude::*;
 use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry};
 use core::{cmp, fmt};
@@ -611,7 +612,7 @@ pub struct ChannelUpdateInfo {
        /// The minimum value, which must be relayed to the next hop via the channel
        pub htlc_minimum_msat: u64,
        /// The maximum value which may be relayed to the next hop via the channel.
-       pub htlc_maximum_msat: Option<u64>,
+       pub htlc_maximum_msat: u64,
        /// Fees charged when the channel is used for routing
        pub fees: RoutingFees,
        /// Most recent update for the channel received from the network
@@ -628,15 +629,58 @@ impl fmt::Display for ChannelUpdateInfo {
        }
 }
 
-impl_writeable_tlv_based!(ChannelUpdateInfo, {
-       (0, last_update, required),
-       (2, enabled, required),
-       (4, cltv_expiry_delta, required),
-       (6, htlc_minimum_msat, required),
-       (8, htlc_maximum_msat, required),
-       (10, fees, required),
-       (12, last_update_message, required),
-});
+impl Writeable for ChannelUpdateInfo {
+       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               write_tlv_fields!(writer, {
+                       (0, self.last_update, required),
+                       (2, self.enabled, required),
+                       (4, self.cltv_expiry_delta, required),
+                       (6, self.htlc_minimum_msat, required),
+                       // Writing htlc_maximum_msat as an Option<u64> is required to maintain backwards
+                       // compatibility with LDK versions prior to v0.0.110.
+                       (8, Some(self.htlc_maximum_msat), required),
+                       (10, self.fees, required),
+                       (12, self.last_update_message, required),
+               });
+               Ok(())
+       }
+}
+
+impl Readable for ChannelUpdateInfo {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               init_tlv_field_var!(last_update, required);
+               init_tlv_field_var!(enabled, required);
+               init_tlv_field_var!(cltv_expiry_delta, required);
+               init_tlv_field_var!(htlc_minimum_msat, required);
+               init_tlv_field_var!(htlc_maximum_msat, option);
+               init_tlv_field_var!(fees, required);
+               init_tlv_field_var!(last_update_message, required);
+
+               read_tlv_fields!(reader, {
+                       (0, last_update, required),
+                       (2, enabled, required),
+                       (4, cltv_expiry_delta, required),
+                       (6, htlc_minimum_msat, required),
+                       (8, htlc_maximum_msat, required),
+                       (10, fees, required),
+                       (12, last_update_message, required)
+               });
+
+               if let Some(htlc_maximum_msat) = htlc_maximum_msat {
+                       Ok(ChannelUpdateInfo {
+                               last_update: init_tlv_based_struct_field!(last_update, required),
+                               enabled: init_tlv_based_struct_field!(enabled, required),
+                               cltv_expiry_delta: init_tlv_based_struct_field!(cltv_expiry_delta, required),
+                               htlc_minimum_msat: init_tlv_based_struct_field!(htlc_minimum_msat, required),
+                               htlc_maximum_msat,
+                               fees: init_tlv_based_struct_field!(fees, required),
+                               last_update_message: init_tlv_based_struct_field!(last_update_message, required),
+                       })
+               } else {
+                       Err(DecodeError::InvalidValue)
+               }
+       }
+}
 
 #[derive(Clone, Debug, PartialEq)]
 /// Details about a channel (both directions).
@@ -715,16 +759,73 @@ impl fmt::Display for ChannelInfo {
        }
 }
 
-impl_writeable_tlv_based!(ChannelInfo, {
-       (0, features, required),
-       (1, announcement_received_time, (default_value, 0)),
-       (2, node_one, required),
-       (4, one_to_two, required),
-       (6, node_two, required),
-       (8, two_to_one, required),
-       (10, capacity_sats, required),
-       (12, announcement_message, required),
-});
+impl Writeable for ChannelInfo {
+       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               write_tlv_fields!(writer, {
+                       (0, self.features, required),
+                       (1, self.announcement_received_time, (default_value, 0)),
+                       (2, self.node_one, required),
+                       (4, self.one_to_two, required),
+                       (6, self.node_two, required),
+                       (8, self.two_to_one, required),
+                       (10, self.capacity_sats, required),
+                       (12, self.announcement_message, required),
+               });
+               Ok(())
+       }
+}
+
+// A wrapper allowing for the optional deseralization of ChannelUpdateInfo. Utilizing this is
+// necessary to maintain backwards compatibility with previous serializations of `ChannelUpdateInfo`
+// that may have no `htlc_maximum_msat` field set. In case the field is absent, we simply ignore
+// the error and continue reading the `ChannelInfo`. Hopefully, we'll then eventually receive newer
+// channel updates via the gossip network.
+struct ChannelUpdateInfoDeserWrapper(Option<ChannelUpdateInfo>);
+
+impl MaybeReadable for ChannelUpdateInfoDeserWrapper {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+               match ::util::ser::Readable::read(reader) {
+                       Ok(channel_update_option) => Ok(Some(Self(channel_update_option))),
+                       Err(DecodeError::ShortRead) => Ok(None),
+                       Err(DecodeError::InvalidValue) => Ok(None),
+                       Err(err) => Err(err),
+               }
+       }
+}
+
+impl Readable for ChannelInfo {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               init_tlv_field_var!(features, required);
+               init_tlv_field_var!(announcement_received_time, (default_value, 0));
+               init_tlv_field_var!(node_one, required);
+               let mut one_to_two_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
+               init_tlv_field_var!(node_two, required);
+               let mut two_to_one_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
+               init_tlv_field_var!(capacity_sats, required);
+               init_tlv_field_var!(announcement_message, required);
+               read_tlv_fields!(reader, {
+                       (0, features, required),
+                       (1, announcement_received_time, (default_value, 0)),
+                       (2, node_one, required),
+                       (4, one_to_two_wrap, ignorable),
+                       (6, node_two, required),
+                       (8, two_to_one_wrap, ignorable),
+                       (10, capacity_sats, required),
+                       (12, announcement_message, required),
+               });
+
+               Ok(ChannelInfo {
+                       features: init_tlv_based_struct_field!(features, required),
+                       node_one: init_tlv_based_struct_field!(node_one, required),
+                       one_to_two: one_to_two_wrap.map(|w| w.0).unwrap_or(None),
+                       node_two: init_tlv_based_struct_field!(node_two, required),
+                       two_to_one: two_to_one_wrap.map(|w| w.0).unwrap_or(None),
+                       capacity_sats: init_tlv_based_struct_field!(capacity_sats, required),
+                       announcement_message: init_tlv_based_struct_field!(announcement_message, required),
+                       announcement_received_time: init_tlv_based_struct_field!(announcement_received_time, (default_value, 0)),
+               })
+       }
+}
 
 /// A wrapper around [`ChannelInfo`] representing information about the channel as directed from a
 /// source node to a target node.
@@ -739,7 +840,7 @@ pub struct DirectedChannelInfo<'a> {
 impl<'a> DirectedChannelInfo<'a> {
        #[inline]
        fn new(channel: &'a ChannelInfo, direction: Option<&'a ChannelUpdateInfo>) -> Self {
-               let htlc_maximum_msat = direction.and_then(|direction| direction.htlc_maximum_msat);
+               let htlc_maximum_msat = direction.map(|direction| direction.htlc_maximum_msat);
                let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
 
                let (htlc_maximum_msat, effective_capacity) = match (htlc_maximum_msat, capacity_msat) {
@@ -989,11 +1090,54 @@ impl fmt::Display for NodeInfo {
        }
 }
 
-impl_writeable_tlv_based!(NodeInfo, {
-       (0, lowest_inbound_channel_fees, option),
-       (2, announcement_info, option),
-       (4, channels, vec_type),
-});
+impl Writeable for NodeInfo {
+       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               write_tlv_fields!(writer, {
+                       (0, self.lowest_inbound_channel_fees, option),
+                       (2, self.announcement_info, option),
+                       (4, self.channels, vec_type),
+               });
+               Ok(())
+       }
+}
+
+// A wrapper allowing for the optional deseralization of `NodeAnnouncementInfo`. Utilizing this is
+// necessary to maintain compatibility with previous serializations of `NetAddress` that have an
+// invalid hostname set. We ignore and eat all errors until we are either able to read a
+// `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
+struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
+
+impl MaybeReadable for NodeAnnouncementInfoDeserWrapper {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+               match ::util::ser::Readable::read(reader) {
+                       Ok(node_announcement_info) => return Ok(Some(Self(node_announcement_info))),
+                       Err(_) => {
+                               copy(reader, &mut sink()).unwrap();
+                               return Ok(None)
+                       },
+               };
+       }
+}
+
+impl Readable for NodeInfo {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               init_tlv_field_var!(lowest_inbound_channel_fees, option);
+               let mut announcement_info_wrap: Option<NodeAnnouncementInfoDeserWrapper> = None;
+               init_tlv_field_var!(channels, vec_type);
+
+               read_tlv_fields!(reader, {
+                       (0, lowest_inbound_channel_fees, option),
+                       (2, announcement_info_wrap, ignorable),
+                       (4, channels, vec_type),
+               });
+
+               Ok(NodeInfo {
+                       lowest_inbound_channel_fees: init_tlv_based_struct_field!(lowest_inbound_channel_fees, option),
+                       announcement_info: announcement_info_wrap.map(|w| w.0),
+                       channels: init_tlv_based_struct_field!(channels, vec_type),
+               })
+       }
+}
 
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
@@ -1495,17 +1639,19 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
                match channels.get_mut(&msg.short_channel_id) {
                        None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}),
                        Some(channel) => {
-                               if let OptionalField::Present(htlc_maximum_msat) = msg.htlc_maximum_msat {
-                                       if htlc_maximum_msat > MAX_VALUE_MSAT {
-                                               return Err(LightningError{err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(), action: ErrorAction::IgnoreError});
-                                       }
+                               if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
+                                       return Err(LightningError{err:
+                                               "htlc_maximum_msat is larger than maximum possible msats".to_owned(),
+                                               action: ErrorAction::IgnoreError});
+                               }
 
-                                       if let Some(capacity_sats) = channel.capacity_sats {
-                                               // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None).
-                                               // Don't query UTXO set here to reduce DoS risks.
-                                               if capacity_sats > MAX_VALUE_MSAT / 1000 || htlc_maximum_msat > capacity_sats * 1000 {
-                                                       return Err(LightningError{err: "htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(), action: ErrorAction::IgnoreError});
-                                               }
+                               if let Some(capacity_sats) = channel.capacity_sats {
+                                       // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None).
+                                       // Don't query UTXO set here to reduce DoS risks.
+                                       if capacity_sats > MAX_VALUE_MSAT / 1000 || msg.htlc_maximum_msat > capacity_sats * 1000 {
+                                               return Err(LightningError{err:
+                                                       "htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(),
+                                                       action: ErrorAction::IgnoreError});
                                        }
                                }
                                macro_rules! check_update_latest {
@@ -1539,7 +1685,7 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
                                                        last_update: msg.timestamp,
                                                        cltv_expiry_delta: msg.cltv_expiry_delta,
                                                        htlc_minimum_msat: msg.htlc_minimum_msat,
-                                                       htlc_maximum_msat: if let OptionalField::Present(max_value) = msg.htlc_maximum_msat { Some(max_value) } else { None },
+                                                       htlc_maximum_msat: msg.htlc_maximum_msat,
                                                        fees: RoutingFees {
                                                                base_msat: msg.fee_base_msat,
                                                                proportional_millionths: msg.fee_proportional_millionths,
@@ -1680,8 +1826,8 @@ mod tests {
        use chain;
        use ln::PaymentHash;
        use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
-       use routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY};
-       use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
+       use routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
+       use ln::msgs::{Init, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
                UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate,
                ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
        use util::test_utils;
@@ -1805,7 +1951,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 144,
                        htlc_minimum_msat: 1_000_000,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: 1_000_000,
                        fee_base_msat: 10_000,
                        fee_proportional_millionths: 20,
                        excess_data: Vec::new()
@@ -2026,7 +2172,7 @@ mod tests {
                let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
                match gossip_sync.handle_channel_update(&valid_channel_update) {
                        Ok(res) => assert!(res),
-                       _ => panic!()
+                       _ => panic!(),
                };
 
                {
@@ -2059,7 +2205,7 @@ mod tests {
                };
 
                let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| {
-                       unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(MAX_VALUE_MSAT + 1);
+                       unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1;
                        unsigned_channel_update.timestamp += 110;
                }, node_1_privkey, &secp_ctx);
                match gossip_sync.handle_channel_update(&valid_channel_update) {
@@ -2068,7 +2214,7 @@ mod tests {
                };
 
                let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| {
-                       unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(amount_sats * 1000 + 1);
+                       unsigned_channel_update.htlc_maximum_msat = amount_sats * 1000 + 1;
                        unsigned_channel_update.timestamp += 110;
                }, node_1_privkey, &secp_ctx);
                match gossip_sync.handle_channel_update(&valid_channel_update) {
@@ -2806,6 +2952,142 @@ mod tests {
                assert_eq!(format_bytes_alias(b"\xFFI <heart>\0LDK!"), "\u{FFFD}I <heart>");
                assert_eq!(format_bytes_alias(b"\xFFI <heart>\tLDK!"), "\u{FFFD}I <heart>\u{FFFD}LDK!");
        }
+
+       #[test]
+       fn channel_info_is_readable() {
+               let chanmon_cfgs = ::ln::functional_test_utils::create_chanmon_cfgs(2);
+               let node_cfgs = ::ln::functional_test_utils::create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = ::ln::functional_test_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None, None, None]);
+               let nodes = ::ln::functional_test_utils::create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // 1. Test encoding/decoding of ChannelUpdateInfo
+               let chan_update_info = ChannelUpdateInfo {
+                       last_update: 23,
+                       enabled: true,
+                       cltv_expiry_delta: 42,
+                       htlc_minimum_msat: 1234,
+                       htlc_maximum_msat: 5678,
+                       fees: RoutingFees { base_msat: 9, proportional_millionths: 10 },
+                       last_update_message: None,
+               };
+
+               let mut encoded_chan_update_info: Vec<u8> = Vec::new();
+               assert!(chan_update_info.write(&mut encoded_chan_update_info).is_ok());
+
+               // First make sure we can read ChannelUpdateInfos we just wrote
+               let read_chan_update_info: ChannelUpdateInfo = ::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap();
+               assert_eq!(chan_update_info, read_chan_update_info);
+
+               // Check the serialization hasn't changed.
+               let legacy_chan_update_info_with_some: Vec<u8> = hex::decode("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap();
+               assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some);
+
+               // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself
+               // or the ChannelUpdate enclosed with `last_update_message`.
+               let legacy_chan_update_info_with_some_and_fail_update: Vec<u8> = hex::decode("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap();
+               let read_chan_update_info_res: Result<ChannelUpdateInfo, ::ln::msgs::DecodeError> = ::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice());
+               assert!(read_chan_update_info_res.is_err());
+
+               let legacy_chan_update_info_with_none: Vec<u8> = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
+               let read_chan_update_info_res: Result<ChannelUpdateInfo, ::ln::msgs::DecodeError> = ::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
+               assert!(read_chan_update_info_res.is_err());
+                       
+               // 2. Test encoding/decoding of ChannelInfo
+               // Check we can encode/decode ChannelInfo without ChannelUpdateInfo fields present.
+               let chan_info_none_updates = ChannelInfo {
+                       features: ChannelFeatures::known(),
+                       node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
+                       one_to_two: None,
+                       node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+                       two_to_one: None,
+                       capacity_sats: None,
+                       announcement_message: None,
+                       announcement_received_time: 87654,
+               };
+
+               let mut encoded_chan_info: Vec<u8> = Vec::new();
+               assert!(chan_info_none_updates.write(&mut encoded_chan_info).is_ok());
+
+               let read_chan_info: ChannelInfo = ::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
+               assert_eq!(chan_info_none_updates, read_chan_info);
+
+               // Check we can encode/decode ChannelInfo with ChannelUpdateInfo fields present.
+               let chan_info_some_updates = ChannelInfo {
+                       features: ChannelFeatures::known(),
+                       node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
+                       one_to_two: Some(chan_update_info.clone()),
+                       node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+                       two_to_one: Some(chan_update_info.clone()),
+                       capacity_sats: None,
+                       announcement_message: None,
+                       announcement_received_time: 87654,
+               };
+
+               let mut encoded_chan_info: Vec<u8> = Vec::new();
+               assert!(chan_info_some_updates.write(&mut encoded_chan_info).is_ok());
+
+               let read_chan_info: ChannelInfo = ::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
+               assert_eq!(chan_info_some_updates, read_chan_info);
+
+               // Check the serialization hasn't changed.
+               let legacy_chan_info_with_some: Vec<u8> = hex::decode("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
+               assert_eq!(encoded_chan_info, legacy_chan_info_with_some);
+
+               // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` /
+               // `last_update_message` fields fail to decode due to missing htlc_maximum_msat.
+               let legacy_chan_info_with_some_and_fail_update = hex::decode("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap();
+               let read_chan_info: ChannelInfo = ::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap();
+               assert_eq!(read_chan_info.announcement_received_time, 87654);
+               assert_eq!(read_chan_info.one_to_two, None);
+               assert_eq!(read_chan_info.two_to_one, None);
+
+               let legacy_chan_info_with_none: Vec<u8> = hex::decode("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
+               let read_chan_info: ChannelInfo = ::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap();
+               assert_eq!(read_chan_info.announcement_received_time, 87654);
+               assert_eq!(read_chan_info.one_to_two, None);
+               assert_eq!(read_chan_info.two_to_one, None);
+       }
+
+       #[test]
+       fn node_info_is_readable() {
+               use std::convert::TryFrom;
+
+               // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one
+               let valid_netaddr = ::ln::msgs::NetAddress::Hostname { hostname: ::util::ser::Hostname::try_from("A".to_string()).unwrap(), port: 1234 };
+               let valid_node_ann_info = NodeAnnouncementInfo {
+                       features: NodeFeatures::known(),
+                       last_update: 0,
+                       rgb: [0u8; 3],
+                       alias: NodeAlias([0u8; 32]),
+                       addresses: vec![valid_netaddr],
+                       announcement_message: None,
+               };
+
+               let mut encoded_valid_node_ann_info = Vec::new();
+               assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok());
+               let read_valid_node_ann_info: NodeAnnouncementInfo = ::util::ser::Readable::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
+               assert_eq!(read_valid_node_ann_info, valid_node_ann_info);
+
+               let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap();
+               let read_invalid_node_ann_info_res: Result<NodeAnnouncementInfo, ::ln::msgs::DecodeError> = ::util::ser::Readable::read(&mut encoded_invalid_node_ann_info.as_slice());
+               assert!(read_invalid_node_ann_info_res.is_err());
+
+               // 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid
+               let valid_node_info = NodeInfo {
+                       channels: Vec::new(),
+                       lowest_inbound_channel_fees: None,
+                       announcement_info: Some(valid_node_ann_info),
+               };
+
+               let mut encoded_valid_node_info = Vec::new();
+               assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok());
+               let read_valid_node_info: NodeInfo = ::util::ser::Readable::read(&mut encoded_valid_node_info.as_slice()).unwrap();
+               assert_eq!(read_valid_node_info, valid_node_info);
+
+               let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap();
+               let read_invalid_node_info: NodeInfo = ::util::ser::Readable::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
+               assert_eq!(read_invalid_node_info.announcement_info, None);
+       }
 }
 
 #[cfg(all(test, feature = "_bench_unstable"))]
index a97e1b6ffa45db704b89505cfce7d6c58cc1f16b..4c00dc5fcf9327126d0f9bd580823fd1447ea4f7 100644 (file)
@@ -238,7 +238,7 @@ pub struct PaymentParameters {
        /// A value of 0 will allow payments up to and including a channel's total announced usable
        /// capacity, a value of one will only use up to half its capacity, two 1/4, etc.
        ///
-       /// Default value: 1
+       /// Default value: 2
        pub max_channel_saturation_power_of_half: u8,
 
        /// A list of SCIDs which this payment was previously attempted over and which caused the
@@ -253,7 +253,7 @@ impl_writeable_tlv_based!(PaymentParameters, {
        (2, features, option),
        (3, max_path_count, (default_value, DEFAULT_MAX_PATH_COUNT)),
        (4, route_hints, vec_type),
-       (5, max_channel_saturation_power_of_half, (default_value, 1)),
+       (5, max_channel_saturation_power_of_half, (default_value, 2)),
        (6, expiry_time, option),
        (7, previously_failed_channels, vec_type),
 });
@@ -268,7 +268,7 @@ impl PaymentParameters {
                        expiry_time: None,
                        max_total_cltv_expiry_delta: DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA,
                        max_path_count: DEFAULT_MAX_PATH_COUNT,
-                       max_channel_saturation_power_of_half: 1,
+                       max_channel_saturation_power_of_half: 2,
                        previously_failed_channels: Vec::new(),
                }
        }
@@ -754,7 +754,7 @@ where L::Target: Logger, GL::Target: Logger {
 pub(crate) fn get_route<L: Deref, S: Score>(
        our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
        first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, final_cltv_expiry_delta: u32,
-       logger: L, scorer: &S, random_seed_bytes: &[u8; 32]
+       logger: L, scorer: &S, _random_seed_bytes: &[u8; 32]
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
        let payee_node_id = NodeId::from_pubkey(&payment_params.payee_pubkey);
@@ -796,11 +796,11 @@ where L::Target: Logger {
        // 4. See if we managed to collect paths which aggregately are able to transfer target value
        //    (not recommended value).
        // 5. If yes, proceed. If not, fail routing.
-       // 6. Randomly combine paths into routes having enough to fulfill the payment. (TODO: knapsack)
-       // 7. Of all the found paths, select only those with the lowest total fee.
-       // 8. The last path in every selected route is likely to be more than we need.
-       //    Reduce its value-to-transfer and recompute fees.
-       // 9. Choose the best route by the lowest total fee.
+       // 6. Select the paths which have the lowest cost (fee plus scorer penalty) per amount
+       //    transferred up to the transfer target value.
+       // 7. Reduce the value of the last path until we are sending only the target value.
+       // 8. If our maximum channel saturation limit caused us to pick two identical paths, combine
+       //    them so that we're not sending two HTLCs along the same path.
 
        // As for the actual search algorithm,
        // we do a payee-to-payer pseudo-Dijkstra's sorting by each node's distance from the payee
@@ -1476,7 +1476,7 @@ where L::Target: Logger {
                // Both these cases (and other cases except reaching recommended_value_msat) mean that
                // paths_collection will be stopped because found_new_path==false.
                // This is not necessarily a routing failure.
-               'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
+               'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, mut value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
 
                        // Since we're going payee-to-payer, hitting our node as a target means we should stop
                        // traversing the graph and arrange the path out of what we found.
@@ -1542,7 +1542,9 @@ where L::Target: Logger {
                                // on some channels we already passed (assuming dest->source direction). Here, we
                                // recompute the fees again, so that if that's the case, we match the currently
                                // underpaid htlc_minimum_msat with fees.
-                               payment_path.update_value_and_recompute_fees(cmp::min(value_contribution_msat, final_value_msat));
+                               debug_assert_eq!(payment_path.get_value_msat(), value_contribution_msat);
+                               value_contribution_msat = cmp::min(value_contribution_msat, final_value_msat);
+                               payment_path.update_value_and_recompute_fees(value_contribution_msat);
 
                                // Since a path allows to transfer as much value as
                                // the smallest channel it has ("bottleneck"), we should recompute
@@ -1653,96 +1655,55 @@ where L::Target: Logger {
                return Err(LightningError{err: "Failed to find a sufficient route to the given destination".to_owned(), action: ErrorAction::IgnoreError});
        }
 
-       // Sort by total fees and take the best paths.
-       payment_paths.sort_unstable_by_key(|path| path.get_total_fee_paid_msat());
-       if payment_paths.len() > 50 {
-               payment_paths.truncate(50);
-       }
+       // Step (6).
+       let mut selected_route = payment_paths;
 
-       // Draw multiple sufficient routes by randomly combining the selected paths.
-       let mut drawn_routes = Vec::new();
-       let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 12]);
-       let mut random_index_bytes = [0u8; ::core::mem::size_of::<usize>()];
+       debug_assert_eq!(selected_route.iter().map(|p| p.get_value_msat()).sum::<u64>(), already_collected_value_msat);
+       let mut overpaid_value_msat = already_collected_value_msat - final_value_msat;
 
-       let num_permutations = payment_paths.len();
-       for _ in 0..num_permutations {
-               let mut cur_route = Vec::<PaymentPath>::new();
-               let mut aggregate_route_value_msat = 0;
+       // First, sort by the cost-per-value of the path, dropping the paths that cost the most for
+       // the value they contribute towards the payment amount.
+       // We sort in descending order as we will remove from the front in `retain`, next.
+       selected_route.sort_unstable_by(|a, b|
+               (((b.get_cost_msat() as u128) << 64) / (b.get_value_msat() as u128))
+                       .cmp(&(((a.get_cost_msat() as u128) << 64) / (a.get_value_msat() as u128)))
+       );
 
-               // Step (6).
-               // Do a Fisher-Yates shuffle to create a random permutation of the payment paths
-               for cur_index in (1..payment_paths.len()).rev() {
-                       prng.process_in_place(&mut random_index_bytes);
-                       let random_index = usize::from_be_bytes(random_index_bytes).wrapping_rem(cur_index+1);
-                       payment_paths.swap(cur_index, random_index);
+       // We should make sure that at least 1 path left.
+       let mut paths_left = selected_route.len();
+       selected_route.retain(|path| {
+               if paths_left == 1 {
+                       return true
+               }
+               let path_value_msat = path.get_value_msat();
+               if path_value_msat <= overpaid_value_msat {
+                       overpaid_value_msat -= path_value_msat;
+                       paths_left -= 1;
+                       return false;
                }
+               true
+       });
+       debug_assert!(selected_route.len() > 0);
 
+       if overpaid_value_msat != 0 {
                // Step (7).
-               for payment_path in &payment_paths {
-                       cur_route.push(payment_path.clone());
-                       aggregate_route_value_msat += payment_path.get_value_msat();
-                       if aggregate_route_value_msat > final_value_msat {
-                               // Last path likely overpaid. Substract it from the most expensive
-                               // (in terms of proportional fee) path in this route and recompute fees.
-                               // This might be not the most economically efficient way, but fewer paths
-                               // also makes routing more reliable.
-                               let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
-
-                               // First, we drop some expensive low-value paths entirely if possible, since fewer
-                               // paths is better: the payment is less likely to fail. In order to do so, we sort
-                               // by value and fall back to total fees paid, i.e., in case of equal values we
-                               // prefer lower cost paths.
-                               cur_route.sort_unstable_by(|a, b| {
-                                       a.get_value_msat().cmp(&b.get_value_msat())
-                                               // Reverse ordering for cost, so we drop higher-cost paths first
-                                               .then_with(|| b.get_cost_msat().cmp(&a.get_cost_msat()))
-                               });
-
-                               // We should make sure that at least 1 path left.
-                               let mut paths_left = cur_route.len();
-                               cur_route.retain(|path| {
-                                       if paths_left == 1 {
-                                               return true
-                                       }
-                                       let mut keep = true;
-                                       let path_value_msat = path.get_value_msat();
-                                       if path_value_msat <= overpaid_value_msat {
-                                               keep = false;
-                                               overpaid_value_msat -= path_value_msat;
-                                               paths_left -= 1;
-                                       }
-                                       keep
-                               });
-
-                               if overpaid_value_msat == 0 {
-                                       break;
-                               }
+               // Now, subtract the remaining overpaid value from the most-expensive path.
+               // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
+               // so that the sender pays less fees overall. And also htlc_minimum_msat.
+               selected_route.sort_unstable_by(|a, b| {
+                       let a_f = a.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>();
+                       let b_f = b.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>();
+                       a_f.cmp(&b_f).then_with(|| b.get_cost_msat().cmp(&a.get_cost_msat()))
+               });
+               let expensive_payment_path = selected_route.first_mut().unwrap();
 
-                               assert!(cur_route.len() > 0);
-
-                               // Step (8).
-                               // Now, subtract the overpaid value from the most-expensive path.
-                               // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
-                               // so that the sender pays less fees overall. And also htlc_minimum_msat.
-                               cur_route.sort_unstable_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
-                               let expensive_payment_path = cur_route.first_mut().unwrap();
-
-                               // We already dropped all the small value paths above, meaning all the
-                               // remaining paths are larger than remaining overpaid_value_msat.
-                               // Thus, this can't be negative.
-                               let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
-                               expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
-                               break;
-                       }
-               }
-               drawn_routes.push(cur_route);
+               // We already dropped all the paths with value below `overpaid_value_msat` above, thus this
+               // can't go negative.
+               let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
+               expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
        }
 
-       // Step (9).
-       // Select the best route by lowest total cost.
-       drawn_routes.sort_unstable_by_key(|paths| paths.iter().map(|path| path.get_cost_msat()).sum::<u64>());
-       let selected_route = drawn_routes.first_mut().unwrap();
-
+       // Step (8).
        // Sort by the path itself and combine redundant paths.
        // Note that we sort by SCIDs alone as its simpler but when combining we have to ensure we
        // compare both SCIDs and NodeIds as individual nodes may use random aliases causing collisions
@@ -1979,8 +1940,8 @@ mod tests {
        use chain::transaction::OutPoint;
        use chain::keysinterface::KeysInterface;
        use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
-       use ln::msgs::{ErrorAction, LightningError, OptionalField, UnsignedChannelAnnouncement, ChannelAnnouncement, RoutingMessageHandler,
-               NodeAnnouncement, UnsignedNodeAnnouncement, ChannelUpdate, UnsignedChannelUpdate};
+       use ln::msgs::{ErrorAction, LightningError, UnsignedChannelAnnouncement, ChannelAnnouncement, RoutingMessageHandler,
+               NodeAnnouncement, UnsignedNodeAnnouncement, ChannelUpdate, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use ln::channelmanager;
        use util::test_utils;
        use util::chacha20::ChaCha20;
@@ -2172,7 +2133,7 @@ mod tests {
                                flags: 0,
                                cltv_expiry_delta: 0,
                                htlc_minimum_msat: 0,
-                               htlc_maximum_msat: OptionalField::Absent,
+                               htlc_maximum_msat: MAX_VALUE_MSAT,
                                fee_base_msat: 0,
                                fee_proportional_millionths: 0,
                                excess_data: Vec::new()
@@ -2184,7 +2145,7 @@ mod tests {
                                flags: 1,
                                cltv_expiry_delta: 0,
                                htlc_minimum_msat: 0,
-                               htlc_maximum_msat: OptionalField::Absent,
+                               htlc_maximum_msat: MAX_VALUE_MSAT,
                                fee_base_msat: 0,
                                fee_proportional_millionths: 0,
                                excess_data: Vec::new()
@@ -2278,7 +2239,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2294,7 +2255,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (5 << 4) | 3,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: u32::max_value(),
                        fee_proportional_millionths: u32::max_value(),
                        excess_data: Vec::new()
@@ -2306,7 +2267,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2322,7 +2283,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (5 << 4) | 3,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: u32::max_value(),
                        fee_proportional_millionths: u32::max_value(),
                        excess_data: Vec::new()
@@ -2334,7 +2295,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2350,7 +2311,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (3 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2362,7 +2323,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (3 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 100,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2376,7 +2337,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 1000000,
                        excess_data: Vec::new()
@@ -2388,7 +2349,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (4 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2402,7 +2363,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (13 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 2000000,
                        excess_data: Vec::new()
@@ -2414,7 +2375,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (13 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2430,7 +2391,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (6 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2442,7 +2403,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (6 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new(),
@@ -2456,7 +2417,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (11 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2468,7 +2429,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (11 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2486,7 +2447,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (7 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 1000000,
                        excess_data: Vec::new()
@@ -2498,7 +2459,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (7 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2583,7 +2544,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2595,7 +2556,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2607,7 +2568,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2619,7 +2580,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2631,7 +2592,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2646,7 +2607,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 200_000_000,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2661,7 +2622,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(199_999_999),
+                       htlc_maximum_msat: 199_999_999,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2680,7 +2641,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2710,7 +2671,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 35_000,
-                       htlc_maximum_msat: OptionalField::Present(40_000),
+                       htlc_maximum_msat: 40_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2722,7 +2683,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 35_000,
-                       htlc_maximum_msat: OptionalField::Present(40_000),
+                       htlc_maximum_msat: 40_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2736,7 +2697,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2748,7 +2709,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2762,7 +2723,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2783,7 +2744,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 65_000,
-                       htlc_maximum_msat: OptionalField::Present(80_000),
+                       htlc_maximum_msat: 80_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2795,7 +2756,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2807,7 +2768,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 100_000,
                        excess_data: Vec::new()
@@ -2846,7 +2807,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -2858,7 +2819,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3264,7 +3225,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3276,7 +3237,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3336,7 +3297,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3348,7 +3309,7 @@ mod tests {
                        flags: 2, // to disable
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3681,7 +3642,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3693,7 +3654,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3708,7 +3669,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+                       htlc_maximum_msat: 1_000_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3723,7 +3684,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: 250_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3756,7 +3717,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+                       htlc_maximum_msat: 1_000_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3791,7 +3752,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+                       htlc_maximum_msat: 1_000_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3806,7 +3767,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(15_000),
+                       htlc_maximum_msat: 15_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3841,7 +3802,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3864,7 +3825,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (3 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: 15_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3876,7 +3837,7 @@ mod tests {
                        flags: 1,
                        cltv_expiry_delta: (3 << 4) | 2,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: 15_000,
                        fee_base_msat: 100,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3908,7 +3869,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(10_000),
+                       htlc_maximum_msat: 10_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3956,7 +3917,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3968,7 +3929,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3983,7 +3944,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -3995,7 +3956,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4008,7 +3969,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(50_000),
+                       htlc_maximum_msat: 50_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4020,7 +3981,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4077,7 +4038,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 1_000_000,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4089,7 +4050,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(50_000),
+                       htlc_maximum_msat: 50_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4134,7 +4095,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4146,7 +4107,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(50_000),
+                       htlc_maximum_msat: 50_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4161,7 +4122,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(60_000),
+                       htlc_maximum_msat: 60_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4173,7 +4134,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(60_000),
+                       htlc_maximum_msat: 60_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4188,7 +4149,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4200,7 +4161,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(180_000),
+                       htlc_maximum_msat: 180_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4291,7 +4252,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4303,7 +4264,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4317,7 +4278,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4329,7 +4290,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4344,7 +4305,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4360,7 +4321,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4372,7 +4333,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4385,7 +4346,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4397,7 +4358,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4459,7 +4420,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4471,7 +4432,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4485,7 +4446,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4497,7 +4458,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4512,7 +4473,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4528,7 +4489,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4540,7 +4501,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000),
+                       htlc_maximum_msat: 200_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4553,7 +4514,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 1_000,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4565,7 +4526,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4626,7 +4587,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4639,7 +4600,7 @@ mod tests {
                        flags: 2,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4653,7 +4614,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4665,7 +4626,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4679,7 +4640,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4702,7 +4663,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(250_000),
+                       htlc_maximum_msat: 250_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4714,7 +4675,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4727,7 +4688,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 150_000,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4739,7 +4700,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4796,7 +4757,7 @@ mod tests {
                                cltv_expiry_delta: 42,
                                htlc_minimum_msat: None,
                                htlc_maximum_msat: None,
-                       }])]);
+                       }])]).with_max_channel_saturation_power_of_half(0);
 
                // Keep only two paths from us to nodes[2], both with a 99sat HTLC maximum, with one with
                // no fee and one with a 1msat fee. Previously, trying to route 100 sats to nodes[2] here
@@ -4810,7 +4771,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (5 << 4) | 5,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(99_000),
+                       htlc_maximum_msat: 99_000,
                        fee_base_msat: u32::max_value(),
                        fee_proportional_millionths: u32::max_value(),
                        excess_data: Vec::new()
@@ -4822,7 +4783,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (5 << 4) | 3,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(99_000),
+                       htlc_maximum_msat: 99_000,
                        fee_base_msat: u32::max_value(),
                        fee_proportional_millionths: u32::max_value(),
                        excess_data: Vec::new()
@@ -4834,7 +4795,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 1,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4846,7 +4807,7 @@ mod tests {
                        flags: 0|2, // Channel disabled
                        cltv_expiry_delta: (13 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 2000000,
                        excess_data: Vec::new()
@@ -4899,7 +4860,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(100_000),
+                       htlc_maximum_msat: 100_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4911,7 +4872,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(50_000),
+                       htlc_maximum_msat: 50_000,
                        fee_base_msat: 100,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4925,7 +4886,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(60_000),
+                       htlc_maximum_msat: 60_000,
                        fee_base_msat: 100,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4937,7 +4898,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(60_000),
+                       htlc_maximum_msat: 60_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4951,7 +4912,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(20_000),
+                       htlc_maximum_msat: 20_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -4963,7 +4924,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(20_000),
+                       htlc_maximum_msat: 20_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5050,7 +5011,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (6 << 4) | 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5065,7 +5026,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (5 << 4) | 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 100,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5080,7 +5041,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5095,7 +5056,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (3 << 4) | 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5110,7 +5071,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (2 << 4) | 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5124,7 +5085,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (1 << 4) | 0,
                        htlc_minimum_msat: 100,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5182,7 +5143,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(85_000),
+                       htlc_maximum_msat: 85_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5195,7 +5156,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(270_000),
+                       htlc_maximum_msat: 270_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 1000000,
                        excess_data: Vec::new()
@@ -5247,7 +5208,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(80_000),
+                       htlc_maximum_msat: 80_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5259,7 +5220,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 1,
                        htlc_minimum_msat: 90_000,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5780,7 +5741,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (4 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000_000),
+                       htlc_maximum_msat: 250_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5792,7 +5753,7 @@ mod tests {
                        flags: 0,
                        cltv_expiry_delta: (13 << 4) | 1,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(200_000_000),
+                       htlc_maximum_msat: 250_000_000,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new()
@@ -5801,8 +5762,8 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(nodes[2]).with_features(InvoiceFeatures::known());
                let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               // 150,000 sat is less than the available liquidity on each channel, set above.
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 150_000_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+               // 100,000 sats is less than the available liquidity on each channel, set above.
+               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!((route.paths[0][1].short_channel_id == 4 && route.paths[1][1].short_channel_id == 13) ||
                        (route.paths[1][1].short_channel_id == 4 && route.paths[0][1].short_channel_id == 13));
index e2e0bdd18a6eddf714ce3288643a0efb1d4d8ca7..6ae339eae2bbc01776b840d655c2ef68ce6e83aa 100644 (file)
@@ -324,6 +324,9 @@ where L::Target: Logger {
 ///
 /// Used to configure base, liquidity, and amount penalties, the sum of which comprises the channel
 /// penalty (i.e., the amount in msats willing to be paid to avoid routing through the channel).
+///
+/// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the
+/// parameters here.
 #[derive(Clone)]
 pub struct ProbabilisticScoringParameters {
        /// A fixed penalty in msats to apply to each channel.
@@ -331,6 +334,20 @@ pub struct ProbabilisticScoringParameters {
        /// Default value: 500 msat
        pub base_penalty_msat: u64,
 
+       /// A multiplier used with the payment amount to calculate a fixed penalty applied to each
+       /// channel, in excess of the [`base_penalty_msat`].
+       ///
+       /// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
+       /// fees plus penalty) for large payments. The penalty is computed as the product of this
+       /// multiplier and `2^30`ths of the payment amount.
+       ///
+       /// ie `base_penalty_amount_multiplier_msat * amount_msat / 2^30`
+       ///
+       /// Default value: 8,192 msat
+       ///
+       /// [`base_penalty_msat`]: Self::base_penalty_msat
+       pub base_penalty_amount_multiplier_msat: u64,
+
        /// A multiplier used in conjunction with the negative `log10` of the channel's success
        /// probability for a payment to determine the liquidity penalty.
        ///
@@ -369,7 +386,7 @@ pub struct ProbabilisticScoringParameters {
        /// multiplier and `2^20`ths of the payment amount, weighted by the negative `log10` of the
        /// success probability.
        ///
-       /// `-log10(success_probability) * amount_penalty_multiplier_msat * amount_msat / 2^20`
+       /// `-log10(success_probability) * liquidity_penalty_amount_multiplier_msat * amount_msat / 2^20`
        ///
        /// In practice, this means for 0.1 success probability (`-log10(0.1) == 1`) each `2^20`th of
        /// the amount will result in a penalty of the multiplier. And, as the success probability
@@ -378,7 +395,7 @@ pub struct ProbabilisticScoringParameters {
        /// fall below `1`.
        ///
        /// Default value: 256 msat
-       pub amount_penalty_multiplier_msat: u64,
+       pub liquidity_penalty_amount_multiplier_msat: u64,
 
        /// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
        /// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
@@ -399,7 +416,7 @@ pub struct ProbabilisticScoringParameters {
        /// current estimate of the channel's available liquidity.
        ///
        /// Note that in this case all other penalties, including the
-       /// [`liquidity_penalty_multiplier_msat`] and [`amount_penalty_multiplier_msat`]-based
+       /// [`liquidity_penalty_multiplier_msat`] and [`liquidity_penalty_amount_multiplier_msat`]-based
        /// penalties, as well as the [`base_penalty_msat`] and the [`anti_probing_penalty_msat`], if
        /// applicable, are still included in the overall penalty.
        ///
@@ -409,7 +426,7 @@ pub struct ProbabilisticScoringParameters {
        /// Default value: 1_0000_0000_000 msat (1 Bitcoin)
        ///
        /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
-       /// [`amount_penalty_multiplier_msat`]: Self::amount_penalty_multiplier_msat
+       /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
        /// [`base_penalty_msat`]: Self::base_penalty_msat
        /// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat
        pub considered_impossible_penalty_msat: u64,
@@ -536,9 +553,10 @@ impl ProbabilisticScoringParameters {
        fn zero_penalty() -> Self {
                Self {
                        base_penalty_msat: 0,
+                       base_penalty_amount_multiplier_msat: 0,
                        liquidity_penalty_multiplier_msat: 0,
                        liquidity_offset_half_life: Duration::from_secs(3600),
-                       amount_penalty_multiplier_msat: 0,
+                       liquidity_penalty_amount_multiplier_msat: 0,
                        manual_node_penalties: HashMap::new(),
                        anti_probing_penalty_msat: 0,
                        considered_impossible_penalty_msat: 0,
@@ -558,9 +576,10 @@ impl Default for ProbabilisticScoringParameters {
        fn default() -> Self {
                Self {
                        base_penalty_msat: 500,
+                       base_penalty_amount_multiplier_msat: 8192,
                        liquidity_penalty_multiplier_msat: 40_000,
                        liquidity_offset_half_life: Duration::from_secs(3600),
-                       amount_penalty_multiplier_msat: 256,
+                       liquidity_penalty_amount_multiplier_msat: 256,
                        manual_node_penalties: HashMap::new(),
                        anti_probing_penalty_msat: 250,
                        considered_impossible_penalty_msat: 1_0000_0000_000,
@@ -631,10 +650,11 @@ const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = approx::LOWER_BITS_BOUND;
 
 /// The divisor used when computing the amount penalty.
 const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20;
+const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30;
 
 impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity<L, T, U> {
-       /// Returns a penalty for routing the given HTLC `amount_msat` through the channel in this
-       /// direction.
+       /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
+       /// this direction.
        fn penalty_msat(&self, amount_msat: u64, params: &ProbabilisticScoringParameters) -> u64 {
                let max_liquidity_msat = self.max_liquidity_msat();
                let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
@@ -653,8 +673,8 @@ impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiqui
                        if amount_msat - min_liquidity_msat < denominator / PRECISION_LOWER_BOUND_DENOMINATOR {
                                // If the failure probability is < 1.5625% (as 1 - numerator/denominator < 1/64),
                                // don't bother trying to use the log approximation as it gets too noisy to be
-                               // particularly helpful, instead just round down to 0 and return the base penalty.
-                               params.base_penalty_msat
+                               // particularly helpful, instead just round down to 0.
+                               0
                        } else {
                                let negative_log10_times_2048 =
                                        approx::negative_log10_times_2048(numerator, denominator);
@@ -663,7 +683,7 @@ impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiqui
                }
        }
 
-       /// Computes the liquidity and amount penalties and adds them to the base penalty.
+       /// Computes the liquidity penalty from the penalty multipliers.
        #[inline(always)]
        fn combined_penalty_msat(
                &self, amount_msat: u64, negative_log10_times_2048: u64,
@@ -676,12 +696,10 @@ impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiqui
                        (negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat)
                };
                let amount_penalty_msat = negative_log10_times_2048
-                       .saturating_mul(params.amount_penalty_multiplier_msat)
+                       .saturating_mul(params.liquidity_penalty_amount_multiplier_msat)
                        .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
 
-               params.base_penalty_msat
-                       .saturating_add(liquidity_penalty_msat)
-                       .saturating_add(amount_penalty_msat)
+               liquidity_penalty_msat.saturating_add(amount_penalty_msat)
        }
 
        /// Returns the lower bound of the channel liquidity balance in this direction.
@@ -763,13 +781,17 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                        return *penalty;
                }
 
+               let base_penalty_msat = self.params.base_penalty_msat.saturating_add(
+                       self.params.base_penalty_amount_multiplier_msat
+                               .saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR);
+
                let mut anti_probing_penalty_msat = 0;
                match usage.effective_capacity {
                        EffectiveCapacity::ExactLiquidity { liquidity_msat } => {
                                if usage.amount_msat > liquidity_msat {
                                        return u64::max_value();
                                } else {
-                                       return self.params.base_penalty_msat;
+                                       return base_penalty_msat;
                                }
                        },
                        EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: Some(htlc_maximum_msat) } => {
@@ -790,6 +812,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                        .as_directed(source, target, capacity_msat, liquidity_offset_half_life)
                        .penalty_msat(amount_msat, &self.params)
                        .saturating_add(anti_probing_penalty_msat)
+                       .saturating_add(base_penalty_msat)
        }
 
        fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
@@ -1258,7 +1281,7 @@ mod tests {
        use util::time::tests::SinceEpoch;
 
        use ln::features::{ChannelFeatures, NodeFeatures};
-       use ln::msgs::{ChannelAnnouncement, ChannelUpdate, OptionalField, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
+       use ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
        use routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
        use routing::router::RouteHop;
        use routing::scoring::{ChannelUsage, Score};
@@ -1385,7 +1408,7 @@ mod tests {
                        flags,
                        cltv_expiry_delta: 18,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Present(1_000),
+                       htlc_maximum_msat: 1_000,
                        fee_base_msat: 1,
                        fee_proportional_millionths: 0,
                        excess_data: Vec::new(),
@@ -2069,47 +2092,47 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: Some(1_000) },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 3613);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 4375);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1977);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2739);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1474);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2236);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1223);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1985);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 877);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1639);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 845);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1607);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
        }
 
        #[test]
@@ -2137,6 +2160,15 @@ mod tests {
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558);
+
+               let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
+                       base_penalty_amount_multiplier_msat: (1 << 30),
+                       anti_probing_penalty_msat: 0, ..Default::default()
+               };
+
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558 + 128);
        }
 
        #[test]
@@ -2153,7 +2185,7 @@ mod tests {
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
-                       amount_penalty_multiplier_msat: 0,
+                       liquidity_penalty_amount_multiplier_msat: 0,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
@@ -2161,7 +2193,7 @@ mod tests {
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
-                       amount_penalty_multiplier_msat: 256,
+                       liquidity_penalty_amount_multiplier_msat: 256,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
index 2282ea55ab36211576b4726645af7579179001f8..42b551633258448fc1a3117a0997913b30edffc4 100644 (file)
@@ -152,6 +152,50 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
        (12, OutdatedChannelManager) => {},
 );
 
+/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
+#[derive(Clone, Debug, PartialEq)]
+pub enum HTLCDestination {
+       /// We tried forwarding to a channel but failed to do so. An example of such an instance is when
+       /// there is insufficient capacity in our outbound channel.
+       NextHopChannel {
+               /// The `node_id` of the next node. For backwards compatibility, this field is
+               /// marked as optional, versions prior to 0.0.110 may not always be able to provide
+               /// counterparty node information.
+               node_id: Option<PublicKey>,
+               /// The outgoing `channel_id` between us and the next node.
+               channel_id: [u8; 32],
+       },
+       /// Scenario where we are unsure of the next node to forward the HTLC to.
+       UnknownNextHop {
+               /// Short channel id we are requesting to forward an HTLC to.
+               requested_forward_scid: u64,
+       },
+       /// Failure scenario where an HTLC may have been forwarded to be intended for us,
+       /// but is invalid for some reason, so we reject it.
+       ///
+       /// Some of the reasons may include:
+       /// * HTLC Timeouts
+       /// * Expected MPP amount to claim does not equal HTLC total
+       /// * Claimable amount does not match expected amount
+       FailedPayment {
+               /// The payment hash of the payment we attempted to process.
+               payment_hash: PaymentHash
+       },
+}
+
+impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
+       (0, NextHopChannel) => {
+               (0, node_id, required),
+               (2, channel_id, required),
+       },
+       (2, UnknownNextHop) => {
+               (0, requested_forward_scid, required),
+       },
+       (4, FailedPayment) => {
+               (0, payment_hash, required),
+       }
+);
+
 /// An Event which you should probably take some action in response to.
 ///
 /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
@@ -540,6 +584,24 @@ pub enum Event {
                /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
                channel_type: ChannelTypeFeatures,
        },
+       /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to
+       /// forward it.
+       ///
+       /// Some scenarios where this event may be sent include:
+       /// * Insufficient capacity in the outbound channel
+       /// * While waiting to forward the HTLC, the channel it is meant to be forwarded through closes
+       /// * When an unknown SCID is requested for forwarding a payment.
+       /// * Claiming an amount for an MPP payment that exceeds the HTLC total
+       /// * The HTLC has timed out
+       ///
+       /// This event, however, does not get generated if an HTLC fails to meet the forwarding
+       /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
+       HTLCHandlingFailed {
+               /// The channel over which the HTLC was received.
+               prev_channel_id: [u8; 32],
+               /// Destination of the HTLC that failed to be processed.
+               failed_next_destination: HTLCDestination,
+       },
 }
 
 impl Writeable for Event {
@@ -684,6 +746,13 @@ impl Writeable for Event {
                                        (6, short_channel_id, option),
                                })
                        },
+                       &Event::HTLCHandlingFailed { ref prev_channel_id, ref failed_next_destination } => {
+                               25u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, prev_channel_id, required),
+                                       (2, failed_next_destination, required),
+                               })
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
index 5b1a86a6a95f27a276f1dc221bd705f57fbf8ed4..ecf85839a5a8e19ebfed3f8e87b8b5832427e4a5 100644 (file)
@@ -244,6 +244,14 @@ pub(crate) trait LengthReadableArgs<P> where Self: Sized
        fn read<R: LengthRead>(reader: &mut R, params: P) -> Result<Self, DecodeError>;
 }
 
+/// A trait that various higher-level rust-lightning types implement allowing them to be read in
+/// from a Read, requiring the implementer to provide the total length of the read.
+pub(crate) trait LengthReadable where Self: Sized
+{
+       /// Reads a Self in from the given LengthRead
+       fn read<R: LengthRead>(reader: &mut R) -> Result<Self, DecodeError>;
+}
+
 /// A trait that various rust-lightning types implement allowing them to (maybe) be read in from a Read
 ///
 /// (C-not exported) as we only export serialization to/from byte arrays instead
index 4f3d800be5dd8479f79a7999318fc627017e7078..2f80fb73a419fa26aca6ab7255a8277d3b0797a6 100644 (file)
@@ -19,7 +19,6 @@ use chain::transaction::OutPoint;
 use chain::keysinterface;
 use ln::features::{ChannelFeatures, InitFeatures};
 use ln::{msgs, wire};
-use ln::msgs::OptionalField;
 use ln::script::ShutdownScript;
 use routing::scoring::FixedPenaltyScorer;
 use util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
@@ -36,6 +35,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 
 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 
 use regex;
@@ -74,6 +74,7 @@ impl keysinterface::KeysInterface for OnlyReadsKeysInterface {
        type Signer = EnforcingSigner;
 
        fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { unreachable!(); }
+       fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> { unreachable!(); }
        fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!(); }
        fn get_destination_script(&self) -> Script { unreachable!(); }
        fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!(); }
@@ -169,7 +170,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                update_res
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }
@@ -407,7 +408,7 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate {
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
-                       htlc_maximum_msat: OptionalField::Absent,
+                       htlc_maximum_msat: msgs::MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: vec![],
@@ -599,6 +600,9 @@ impl keysinterface::KeysInterface for TestKeysInterface {
        fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()> {
                self.backing.get_node_secret(recipient)
        }
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+               self.backing.ecdh(recipient, other_key, tweak)
+       }
        fn get_inbound_payment_key_material(&self) -> keysinterface::KeyMaterial {
                self.backing.get_inbound_payment_key_material()
        }