Merge pull request #1692 from TheBlueMatt/2022-08-time-goes-backwards
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Sat, 3 Sep 2022 16:15:33 +0000 (16:15 +0000)
committerGitHub <noreply@github.com>
Sat, 3 Sep 2022 16:15:33 +0000 (16:15 +0000)
Handle monotonic clock going backwards during runtime

74 files changed:
.github/workflows/build.yml
.gitignore
CHANGELOG.md
CONTRIBUTING.md
README.md
fuzz/Cargo.toml
fuzz/ci-fuzz.sh
fuzz/src/bin/gen_target.sh
fuzz/src/bin/onion_message_target.rs [new file with mode: 0644]
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/lib.rs
fuzz/src/onion_message.rs [new file with mode: 0644]
fuzz/src/process_network_graph.rs
fuzz/targets.h
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/convert.rs
lightning-block-sync/src/lib.rs
lightning-block-sync/src/test_utils.rs
lightning-invoice/Cargo.toml
lightning-invoice/fuzz/Cargo.toml
lightning-invoice/fuzz/ci-fuzz.sh
lightning-invoice/src/lib.rs
lightning-invoice/src/payment.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/lib.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning/Cargo.toml
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/mod.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/debug_sync.rs
lightning/src/lib.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/script.rs
lightning/src/ln/wire.rs
lightning/src/onion_message/blinded_route.rs [new file with mode: 0644]
lightning/src/onion_message/functional_tests.rs [new file with mode: 0644]
lightning/src/onion_message/messenger.rs [new file with mode: 0644]
lightning/src/onion_message/mod.rs [new file with mode: 0644]
lightning/src/onion_message/packet.rs [new file with mode: 0644]
lightning/src/onion_message/utils.rs [new file with mode: 0644]
lightning/src/routing/gossip.rs
lightning/src/routing/scoring.rs
lightning/src/util/chacha20poly1305rfc.rs
lightning/src/util/config.rs
lightning/src/util/events.rs
lightning/src/util/macro_logger.rs
lightning/src/util/ser.rs
lightning/src/util/ser_macros.rs
lightning/src/util/test_utils.rs
lightning/src/util/transaction_utils.rs

index cfb1b9c847420dc0cb4ee0edb9188033a738e26f..be52e05776cec547342fdae0eedf4777eb41bfd2 100644 (file)
@@ -62,6 +62,8 @@ jobs:
       - name: Pin tokio to 1.14 for Rust 1.45
         if: "matrix.build-net-old-tokio"
         run: cargo update -p tokio --precise "1.14.0" --verbose
+        env:
+          CARGO_NET_GIT_FETCH_WITH_CLI: "true"
       - name: Build on Rust ${{ matrix.toolchain }} with net-tokio
         if: "matrix.build-net-tokio && !matrix.coverage"
         run: cargo build --verbose --color always
index 10e905af5145fbae9e0c61afa82f3beeeeb9d0c9..d99c5a4ddf6603b32fb97d666d49c3ebb631df79 100644 (file)
@@ -8,4 +8,6 @@ lightning-c-bindings/a.out
 Cargo.lock
 .idea
 lightning/target
+lightning/ldk-net_graph-*.bin
 no-std-check/target
+
index c907952ce9184afdfac0b1f3f5a9ecbe9267f103..4618446397540439cd43357a794da30bcadcdf5d 100644 (file)
@@ -1,4 +1,4 @@
-# 0.0.110 - 2022-XXX
+# 0.0.110 - 2022-07-26
 
 ## API Updates
  * `ChannelManager::send_probe` and `Score::probe_{failed,successful}` have
index 93666dd22970d0bbeb2dedc206fa2f484d956586..f7cf8c4e699e3578285abbdbe3c72b8a0f06759b 100644 (file)
@@ -1,27 +1,35 @@
 Contributing to Rust-Lightning
 ==============================
 
-The Rust-Lightning project operates an open contributor model where anyone is
-welcome to contribute towards development in the form of peer review, documentation,
-testing and patches.
-
-Anyone is invited to contribute without regard to technical experience, "expertise", OSS
-experience, age, or other concern. However, the development of cryptocurrencies demands a
-high-level of rigor, adversarial thinking, thorough testing and risk-minimization.
-Any bug may cost users real money. That being said, we deeply welcome people contributing
-for the first time to an open source project or pick up Rust while contributing. Don't be shy,
-you'll learn.
-
-Communications Channels
+The `rust-lightning` project operates an open contributor model where anyone is
+welcome to contribute towards development in the form of peer review,
+documentation, testing and patches.
+
+Anyone is invited to contribute without regard to technical experience,
+"expertise", OSS experience, age, or other concern. However, the development of
+cryptocurrencies demands a high-level of rigor, adversarial thinking, thorough
+testing and risk-minimization. Any bug may cost users real money. That being
+said, we deeply welcome people contributing for the first time to an open source
+project or pick up Rust while contributing. Don't be shy, you'll learn.
+
+Communication Channels
 -----------------------
 
-Communication about Rust-Lightning happens primarily on #ldk-dev on the
-[LDK slack](http://www.lightningdevkit.org/), but also #rust-bitcoin on IRC Freenode.
+Communication about the development of LDK and `rust-lightning` happens
+primarily on the [LDK Discord](https://discord.gg/5AcknnMfBw) in the `#ldk-dev`
+channel. Additionally, live LDK devevelopment meetings are held every other
+Monday 19:00 UTC in the [LDK Dev Jitsi Meeting
+Room](https://meet.jit.si/ldkdevmeeting). Upcoming events can be found in the
+[LDK calendar](https://calendar.google.com/calendar/embed?src=c_e6fv6vlshbpoob2mmbvblkkoj4%40group.calendar.google.com).
+
+Contributors starting out with the Rust language are welcome to discuss and ask
+for help in the `#rust-101` channel on LDK Discord.
 
 Discussion about code base improvements happens in GitHub issues and on pull
 requests.
 
-Major projects are tracked [here](https://github.com/lightningdevkit/rust-lightning/projects).
+The LDK roadmap is tracked [here](https://github.com/orgs/lightningdevkit/projects/2).
+
 Major milestones are tracked [here](https://github.com/lightningdevkit/rust-lightning/milestones?direction=asc&sort=title&state=open).
 
 Getting Started
@@ -29,25 +37,28 @@ Getting Started
 
 First and foremost, start small.
 
-This doesn't mean don't be ambitious with the breadth and depth of your contributions but rather
-understand the project culture before investing an asymmetric number of hours on
-development compared to your merged work.
+This doesn't mean don't be ambitious with the breadth and depth of your
+contributions but rather understand the project culture before investing an
+asymmetric number of hours on development compared to your merged work.
 
 Browsing through the [meeting minutes](https://github.com/lightningdevkit/rust-lightning/wiki/Meetings)
-is a good first step. You will learn who is working on what, how releases are drafted, what are the
-pending tasks to deliver, where you can contribute review bandwidth, etc.
+is a good first step. You will learn who is working on what, how releases are
+drafted, what are the pending tasks to deliver, where you can contribute review
+bandwidth, etc.
 
-Even if you have an extensive open source background or sound software engineering skills, consider
-that the reviewers' comprehension of the code is as much important as technical correctness.
+Even if you have an extensive open source background or sound software
+engineering skills, consider that the reviewers' comprehension of the code is as
+much important as technical correctness.
 
-It's very welcome to ask for review, either on IRC or LDK Slack. And also for reviewers, it's nice
-to provide timelines when you hope to fulfill the request while bearing in mind for both sides that's
-a "soft" commitment.
+It's very welcome to ask for review on LDK Discord. And also for reviewers, it's
+nice to provide timelines when you hope to fulfill the request while bearing in
+mind for both sides that's a "soft" commitment.
 
-If you're eager to increase the velocity of the dev process, reviewing other contributors work is
-the best you can do while waiting review on yours.
+If you're eager to increase the velocity of the dev process, reviewing other
+contributors work is the best you can do while waiting review on yours.
 
-Also, getting familiar with the [glossary](GLOSSARY.md) will streamline discussions with regular contributors.
+Also, getting familiar with the [glossary](GLOSSARY.md) will streamline
+discussions with regular contributors.
 
 Contribution Workflow
 ---------------------
@@ -80,14 +91,14 @@ our GitHub Actions). Also, the compatibility for LDK object serialization is
 currently ensured back to and including crate version 0.0.99 (see the
 [changelog](CHANGELOG.md)).
 
-Commits should cover both the issue fixed and the solution's rationale.
-These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
+Commits should cover both the issue fixed and the solution's rationale. These
+[guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
 
-To facilitate communication with other contributors, the project is making use of
-GitHub's "assignee" field. First check that no one is assigned and then comment
-suggesting that you're working on it. If someone is already assigned, don't hesitate
-to ask if the assigned party or previous commenters are still working on it if it has
-been awhile.
+To facilitate communication with other contributors, the project is making use
+of GitHub's "assignee" field. First check that no one is assigned and then
+comment suggesting that you're working on it. If someone is already assigned,
+don't hesitate to ask if the assigned party or previous commenters are still
+working on it if it has been awhile.
 
 Peer review
 -----------
@@ -95,8 +106,8 @@ Peer review
 Anyone may participate in peer review which is expressed by comments in the pull
 request. Typically reviewers will review the code for obvious errors, as well as
 test out the patch set and opine on the technical merits of the patch. PR should
-be reviewed first on the conceptual level before focusing on code style or grammar
-fixes.
+be reviewed first on the conceptual level before focusing on code style or
+grammar fixes.
 
 Coding Conventions
 ------------------
@@ -104,65 +115,70 @@ Coding Conventions
 Use tabs. If you want to align lines, use spaces. Any desired alignment should
 display fine at any tab-length display setting.
 
-Our CI enforces [clippy's](https://github.com/rust-lang/rust-clippy) default linting
-[settings](https://rust-lang.github.io/rust-clippy/rust-1.39.0/index.html).
-This includes all lint groups except for nursery, pedantic, and cargo in addition to allowing the following lints:
-`erasing_op`, `never_loop`, `if_same_then_else`.
+Our CI enforces [clippy's](https://github.com/rust-lang/rust-clippy) default
+linting
+[settings](https://rust-lang.github.io/rust-clippy/rust-1.39.0/index.html). This
+includes all lint groups except for nursery, pedantic, and cargo in addition to
+allowing the following lints: `erasing_op`, `never_loop`, `if_same_then_else`.
 
-If you use rustup, feel free to lint locally, otherwise you can just push to CI for automated linting.
+If you use rustup, feel free to lint locally, otherwise you can just push to CI
+for automated linting.
 
 ```bash
 rustup component add clippy
 cargo clippy
 ```
 
-Significant structures that users persist should always have their serialization methods (usually
-`Writeable::write` and `ReadableArgs::read`) begin with
+Significant structures that users persist should always have their serialization
+methods (usually `Writeable::write` and `ReadableArgs::read`) begin with
 `write_ver_prefix!()`/`read_ver_prefix!()` calls, and end with calls to
 `write_tlv_fields!()`/`read_tlv_fields!()`.
 
-Updates to the serialized format which has implications for backwards or forwards compatibility
-must be included in release notes.
+Updates to the serialized format which has implications for backwards or
+forwards compatibility must be included in release notes.
 
 Security
 --------
 
-Security is the primary focus of Rust-Lightning; disclosure of security vulnerabilites
-helps prevent user loss of funds. If you believe a vulnerability may affect other Lightning
-implementations, please inform them.
+Security is the primary focus of `rust-lightning`; disclosure of security
+vulnerabilites helps prevent user loss of funds. If you believe a vulnerability
+may affect other Lightning implementations, please inform them.
 
-Note that Rust-Lightning is currently considered "pre-production" during this time, there
-is no special handling of security issues. Please simply open an issue on Github.
+You can find further information on submitting (possible) vulnerabilities in the
+[security policy](SECURITY.md).
 
 Testing
 -------
 
-Related to the security aspect, Rust-Lightning developers take testing
-very seriously. Due to the modular nature of the project, writing new functional
-tests is easy and good test coverage of the codebase is an important goal. Refactoring
-the project to enable fine-grained unit testing is also an ongoing effort.
+Related to the security aspect, `rust-lightning` developers take testing very
+seriously. Due to the modular nature of the project, writing new functional
+tests is easy and good test coverage of the codebase is an important goal.
+Refactoring the project to enable fine-grained unit testing is also an ongoing
+effort.
 
 Fuzzing is heavily encouraged: you will find all related material under `fuzz/`
 
-Mutation testing is work-in-progress; any contribution there would be warmly welcomed.
+Mutation testing is work-in-progress; any contribution there would be warmly
+welcomed.
 
 C/C++ Bindings
 --------------
 
-You can learn more about the C/C++ bindings that are made available by reading the
-[C/C++ Bindings README](lightning-c-bindings/README.md). If you are not using the C/C++ bindings,
-you likely don't need to worry about them, and during their early experimental phase we are not
-requiring that pull requests keep the bindings up to date (and, thus, pass the bindings_check CI
-run). If you wish to ensure your PR passes the bindings generation phase, you should run the
-`genbindings.sh` script in the top of the directory tree to generate, build, and test C bindings on
-your local system.
+You can learn more about the C/C++ bindings that are made available by reading
+the [C/C++ Bindings README](https://github.com/lightningdevkit/ldk-c-bindings/blob/main/lightning-c-bindings/README.md).
+If you are not using the C/C++ bindings, you likely don't need to worry about
+them, and during their early experimental phase we are not requiring that pull
+requests keep the bindings up to date (and, thus, pass the `bindings_check` CI
+run). If you wish to ensure your PR passes the bindings generation phase, you
+should run the `genbindings.sh` script in the top of the directory tree to
+generate, build, and test C bindings on your local system.
 
 Going further
 -------------
 
-You may be interested by Jon Atack guide on [How to review Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-review-bitcoin-core-prs.md)
+You may be interested by Jon Atack's guide on [How to review Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-review-bitcoin-core-prs.md)
 and [How to make Bitcoin Core PRs](https://github.com/jonatack/bitcoin-development/blob/master/how-to-make-bitcoin-core-prs.md).
-While there are differences between the projects in terms of context and maturity, many
-of the suggestions offered apply to this project.
+While there are differences between the projects in terms of context and
+maturity, many of the suggestions offered apply to this project.
 
 Overall, have fun :)
index 3c34335c244af9d04734f2be010d424b06c498fd..fea9c35dc0ec3282be2545b43b36af62535de986 100644 (file)
--- a/README.md
+++ b/README.md
@@ -5,74 +5,73 @@ Rust-Lightning
 [![Documentation](https://img.shields.io/static/v1?logo=read-the-docs&label=docs.rs&message=lightning&color=informational)](https://docs.rs/lightning/)
 [![Safety Dance](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
 
-Rust-Lightning is a Bitcoin Lightning library written in Rust. The main crate,
+`rust-lightning` is a Bitcoin Lightning library written in Rust. The main crate,
 `lightning`, does not handle networking, persistence, or any other I/O. Thus,
 it is runtime-agnostic, but users must implement basic networking logic, chain
 interactions, and disk storage. More information is available in the `About`
 section.
 
-The `lightning-net-tokio` crate implements Lightning networking using the
-[Tokio](https://github.com/tokio-rs/tokio) async runtime.
-
-The `lightning-persister` crate implements persistence for channel data that
-is crucial to avoiding loss of channel funds. Sample modules for persistence of
-other Rust-Lightning data is coming soon.
-
 Status
 ------
-
-The project implements all of the BOLT specifications in the 1.0 spec. The
+The project implements all of the [BOLT
+specifications](https://github.com/lightning/bolts). The
 implementation has pretty good test coverage that is expected to continue to
 improve. It is also anticipated that as developers begin using the API, the
 lessons from that will result in changes to the API, so any developer using this
 API at this stage should be prepared to embrace that. The current state is
-sufficient for a developer or project to experiment with it. Recent increased
-contribution rate to the project is expected to lead to a high quality, stable,
-production-worthy implementation in 2021.
+sufficient for a developer or project to experiment with it.
 
-Communications for Rust-Lightning and Lightning Development Kit happens through our LDK
-[slack](https://join.slack.com/t/lightningdevkit/shared_invite/zt-tte36cb7-r5f41MDn3ObFtDu~N9dCrQ) & [discord](https://discord.gg/5AcknnMfBw) channels.
+Communications for `rust-lightning` and Lightning Development Kit happen through
+our LDK [Discord](https://discord.gg/5AcknnMfBw) channels.
 
 Crates
 -----------
-1. [lightning](./lightning)   
-  The Core of the LDK library, implements the lightning protocol, channel state machine, 
-  and on-chain logic. Supports no-std and exposes on relatively low-level interfaces.
+1. [lightning](./lightning)
+  The core of the LDK library, implements the Lightning protocol, channel state machine,
+  and on-chain logic. Supports `no-std` and exposes only relatively low-level interfaces.
 2. [lightning-background-processor](./lightning-background-processor)
   Utilities to perform required background tasks for Rust Lightning.
 3. [lightning-block-sync](./lightning-block-sync)
   Utilities to fetch the chain data from a block source and feed them into Rust Lightning.
 4. [lightning-invoice](./lightning-invoice)
-  Data structures to parse and serialize BOLT11 lightning invoices.
+  Data structures to parse and serialize
+  [BOLT #11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md)
+  Lightning invoices.
 5. [lightning-net-tokio](./lightning-net-tokio)
-  Implementation of the rust-lightning network stack using Tokio.
-  For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes,
-  this is a simple alternative to implementing the required network stack, especially for those already using Tokio.
+  Implementation of the `rust-lightning` network stack using the
+  [Tokio](https://github.com/tokio-rs/tokio) `async` runtime. For `rust-lightning`
+  clients which wish to make direct connections to Lightning P2P nodes, this is
+  a simple alternative to implementing the required network stack, especially
+  for those already using Tokio.
 6. [lightning-persister](./lightning-persister)
-  Utilities to manage Rust-Lightning channel data persistence and retrieval.
+  Implements utilities to manage `rust-lightning` channel data persistence and retrieval.
+  Persisting channel data is crucial to avoiding loss of channel funds.
 7. [lightning-rapid-gossip-sync](./lightning-rapid-gossip-sync)
   Client for rapid gossip graph syncing, aimed primarily at mobile clients.
 
 About
 -----------
-LDK/Rust-Lightning is a generic library which allows you to build a lightning
-node without needing to worry about getting all of the lightning state machine,
+LDK/`rust-lightning` is a generic library which allows you to build a Lightning
+node without needing to worry about getting all of the Lightning state machine,
 routing, and on-chain punishment code (and other chain interactions) exactly
-correct. Note that Rust-Lightning isn't, in itself, a node. There are various
+correct. Note that `rust-lightning` isn't, in itself, a node. There are various
 working/in progress demos which could be used as a node today, but if you "just"
-want a generic lightning node, you're almost certainly better off with
-`c-lightning`/`lnd` - if, on the other hand, you want to integrate lightning
-with custom features such as your own chain sync, your own key management, your
-own data storage/backup logic, etc., LDK is likely your only option. Some
-Rust-Lightning utilities such as those in `chan_utils` are also suitable for use
-in non-LN Bitcoin applications such as DLCs and bulletin boards.
-
-We are currently working on a demo node which fetches blockchain data and
-on-chain funds via Bitcoin Core RPC/REST. The individual pieces of that demo
-are/will be composable, so you can pick the off-the-shelf parts you want and
-replace the rest.
-
-In general, Rust-Lightning does not provide (but LDK has implementations of):
+want a generic Lightning node, you're almost certainly better off with [Core
+Lightning](https://github.com/ElementsProject/lightning) or
+[LND](https://github.com/lightningnetwork/lnd). If, on the other hand, you want
+to integrate Lightning with custom features such as your own chain sync, your
+own key management, your own data storage/backup logic, etc., LDK is likely your
+only option. Some `rust-lightning` utilities such as those in
+[`chan_utils`](./lightning/src/ln/chan_utils.rs) are also suitable for use in
+non-LN Bitcoin applications such as Discreet Log Contracts (DLCs) and bulletin boards.
+
+A sample node which fetches blockchain data and manages on-chain funds via the
+Bitcoin Core RPC/REST interface is available
+[here](https://github.com/lightningdevkit/ldk-sample/). The individual pieces of
+that demo are composable, so you can pick the off-the-shelf parts you want
+and replace the rest.
+
+In general, `rust-lightning` does not provide (but LDK has implementations of):
 * on-disk storage - you can store the channel state any way you want - whether
   Google Drive/iCloud, a local disk, any key-value store/database/a remote
   server, or any combination of them - we provide a clean API that provides
@@ -84,21 +83,21 @@ In general, Rust-Lightning does not provide (but LDK has implementations of):
   informed of, which is compatible with Electrum server requests/neutrino
   filtering/etc.
 * UTXO management - RL/LDK owns on-chain funds as long as they are claimable as
-  a part of a lightning output which can be contested - once a channel is closed
+  part of a Lightning output which can be contested - once a channel is closed
   and all on-chain outputs are spendable only by the user, we provide users
   notifications that a UTXO is "theirs" again and it is up to them to spend it
   as they wish. Additionally, channel funding is accomplished with a generic API
   which notifies users of the output which needs to appear on-chain, which they
   can then create a transaction for. Once a transaction is created, we handle
   the rest. This is a large part of our API's goals - making it easier to
-  integrate lightning into existing on-chain wallets which have their own
+  integrate Lightning into existing on-chain wallets which have their own
   on-chain logic - without needing to move funds in and out of a separate
-  lightning wallet with on-chain transactions and a separate private key system.
-* networking - to enable a user to run a full lightning node on an embedded
+  Lightning wallet with on-chain transactions and a separate private key system.
+* networking - to enable a user to run a full Lightning node on an embedded
   machine, we don't specify exactly how to connect to another node at all! We
   provide a default implementation which uses TCP sockets, but, e.g., if you
-  wanted to run your full lightning node on a hardware wallet, you could, by
-  piping the lightning network messages over USB/serial and then sending them in
+  wanted to run your full Lightning node on a hardware wallet, you could, by
+  piping the Lightning network messages over USB/serial and then sending them in
   a TCP socket from another machine.
 * private keys - again we have "default implementations", but users can chose to
   provide private keys to RL/LDK in any way they wish following a simple API. We
@@ -111,25 +110,24 @@ https://vimeo.com/showcase/8372504/video/412818125
 
 Design Goal
 -----------
-
-The goal is to provide a full-featured but also incredibly flexible lightning
+The goal is to provide a full-featured but also incredibly flexible Lightning
 implementation, allowing the user to decide how they wish to use it. With that
 in mind, everything should be exposed via simple, composable APIs. More
-information about Rust-Lightning's flexibility is provided in the `About`
+information about `rust-lightning`'s flexibility is provided in the `About`
 section above.
 
 For security reasons, do not add new dependencies. Really do not add new
 non-optional/non-test/non-library dependencies. Really really do not add
 dependencies with dependencies. Do convince Andrew to cut down dependency usage
-in rust-bitcoin.
+in `rust-bitcoin`.
 
 Rust-Lightning vs. LDK (Lightning Development Kit)
 -------------
-Rust-Lightning refers to the core `lightning` crate within this repo, whereas
-LDK encompasses Rust-Lightning and all of its sample modules and crates (e.g.
+`rust-lightning` refers to the core `lightning` crate within this repo, whereas
+LDK encompasses `rust-lightning` and all of its sample modules and crates (e.g.
 the `lightning-persister` crate), language bindings, sample node
-implementation(s), and other tools built around using Rust-Lightning for
-lightning integration or building a lightning node.
+implementation(s), and other tools built around using `rust-lightning` for
+Lightning integration or building a Lightning node.
 
 Tagline
 -------
@@ -144,7 +142,7 @@ Contributors are warmly welcome, see [CONTRIBUTING.md](CONTRIBUTING.md).
 Project Architecture
 ---------------------
 
-For a Rust-Lightning high-level API introduction, see [ARCH.md](ARCH.md).
+For a `rust-lightning` high-level API introduction, see [ARCH.md](ARCH.md).
 
 License is either Apache-2.0 or MIT, at the option of the user (ie dual-license
 Apache-2.0 and MIT).
index 66dabcfe4be3242eac2240a287476745c8f03e2a..95308e1bf5c3ae2f81db847b08abcc2690796c7c 100644 (file)
@@ -20,10 +20,10 @@ stdin_fuzz = []
 afl = { version = "0.4", optional = true }
 lightning = { path = "../lightning", features = ["regex"] }
 lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
-bitcoin = { version = "0.28.1", features = ["secp-lowmemory"] }
+bitcoin = { version = "0.29.0", features = ["secp-lowmemory"] }
 hex = "0.3"
-honggfuzz = { version = "0.5", optional = true }
-libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true }
+honggfuzz = { version = "0.5", optional = true, default-features = false }
+libfuzzer-sys = { version = "0.4", optional = true }
 
 [build-dependencies]
 cc = "1.0"
index 6f0074b6a1a0d44ea889d8759fd3d7fde037e950..969505ca88d1bf1e227fd1eeee73c26c13dea741 100755 (executable)
@@ -13,7 +13,7 @@ rm *_target.rs
 [ "$(git diff)" != "" ] && exit 1
 popd
 
-cargo install --color always --force honggfuzz
+cargo install --color always --force honggfuzz --no-default-features
 sed -i 's/lto = true//' Cargo.toml
 HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" cargo --color always hfuzz build
 for TARGET in src/bin/*.rs; do
index c0daa5a3a0f36abfb9b898438a0641135302c770..95e65695eb868c798aba81b1d3684ba870154521 100755 (executable)
@@ -9,6 +9,7 @@ GEN_TEST() {
 GEN_TEST chanmon_deser
 GEN_TEST chanmon_consistency
 GEN_TEST full_stack
+GEN_TEST onion_message
 GEN_TEST peer_crypt
 GEN_TEST process_network_graph
 GEN_TEST router
diff --git a/fuzz/src/bin/onion_message_target.rs b/fuzz/src/bin/onion_message_target.rs
new file mode 100644 (file)
index 0000000..e9bcf59
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::onion_message::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               onion_message_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       onion_message_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       onion_message_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       onion_message_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               onion_message_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/onion_message") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               onion_message_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
index f384d21927e91baf377a292e0a7c043ec904d366..372bed6049370c065c0d7a660f61a466ec715bd4 100644 (file)
 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
 //! channel being force-closed.
 
+use bitcoin::TxMerkleNode;
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::locktime::PackedLockTime;
 use bitcoin::network::constants::Network;
 
 use bitcoin::hashes::Hash as TraitImport;
@@ -53,7 +55,8 @@ use lightning::routing::router::{Route, RouteHop};
 use utils::test_logger::{self, Output};
 use utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey, Scalar};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
@@ -165,6 +168,14 @@ impl KeysInterface for KeyProvider {
                Ok(SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap())
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret = node_secret.mul_tweak(tweak).unwrap();
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id])
        }
@@ -438,7 +449,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                let events = $source.get_and_clear_pending_events();
                                assert_eq!(events.len(), 1);
                                if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
-                                       let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                                       let tx = Transaction { version: $chan_id, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                                        }]};
                                        funding_output = OutPoint { txid: tx.txid(), index: 0 };
@@ -472,11 +483,11 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        macro_rules! confirm_txn {
                ($node: expr) => { {
                        let chain_hash = genesis_block(Network::Bitcoin).block_hash();
-                       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+                       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
                        let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
                        $node.transactions_confirmed(&header, &txdata, 1);
                        for _ in 2..100 {
-                               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+                               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
                        }
                        $node.best_block_updated(&header, 99);
                } }
index fa0211aa280bc0794fa37f67d53332b1793d3129..f506acc9fe88123ab74ea3fb6e986b73dc52efbb 100644 (file)
 //! or payments to send/ways to handle events generated.
 //! This test has been very useful, though due to its complexity good starting inputs are critical.
 
+use bitcoin::TxMerkleNode;
 use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::locktime::PackedLockTime;
 use bitcoin::consensus::encode::deserialize;
 use bitcoin::network::constants::Network;
-use bitcoin::blockdata::constants::genesis_block;
 
 use bitcoin::hashes::Hash as TraitImport;
 use bitcoin::hashes::HashEngine as TraitImportEngine;
@@ -50,7 +52,8 @@ use lightning::util::ser::ReadableArgs;
 use utils::test_logger;
 use utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey, Scalar};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
@@ -163,7 +166,7 @@ type ChannelMan = ChannelManager<
        EnforcingSigner,
        Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
-type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>, IgnoringMessageHandler>;
+type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn chain::Access>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan>,
@@ -212,7 +215,7 @@ impl<'a> MoneyLossDetector<'a> {
                }
 
                self.blocks_connected += 1;
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: TxMerkleNode::all_zeros(), time: self.blocks_connected, bits: 42, nonce: 42 };
                self.height += 1;
                self.manager.transactions_confirmed(&header, &txdata, self.height as u32);
                self.manager.best_block_updated(&header, self.height as u32);
@@ -229,7 +232,7 @@ impl<'a> MoneyLossDetector<'a> {
 
        fn disconnect_block(&mut self) {
                if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
-                       let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
+                       let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: TxMerkleNode::all_zeros(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
                        self.manager.block_disconnected(&header, self.height as u32);
                        self.monitor.block_disconnected(&header, self.height as u32);
                        self.height -= 1;
@@ -269,6 +272,14 @@ impl KeysInterface for KeyProvider {
                Ok(self.node_secret.clone())
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret = node_secret.mul_tweak(tweak).unwrap();
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
@@ -403,6 +414,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler {
                chan_handler: channelmanager.clone(),
                route_handler: gossip_sync.clone(),
+               onion_message_handler: IgnoringMessageHandler {},
        }, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{}));
 
        let mut should_forward = false;
@@ -555,7 +567,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        },
                        10 => {
                                'outer_loop: for funding_generation in pending_funding_generation.drain(..) {
-                                       let mut tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                                       let mut tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                                                        value: funding_generation.2, script_pubkey: funding_generation.3,
                                                }] };
                                        let funding_output = 'search_loop: loop {
index 5e158aee36ffe050e6f53176ddd1fc7887935871..2238a9702a9563a78e59ef8010f6e816f282b4b1 100644 (file)
@@ -17,6 +17,7 @@ pub mod utils;
 pub mod chanmon_deser;
 pub mod chanmon_consistency;
 pub mod full_stack;
+pub mod onion_message;
 pub mod peer_crypt;
 pub mod process_network_graph;
 pub mod router;
diff --git a/fuzz/src/onion_message.rs b/fuzz/src/onion_message.rs
new file mode 100644 (file)
index 0000000..a2fe88a
--- /dev/null
@@ -0,0 +1,152 @@
+// Imports that need to be added manually
+use bitcoin::bech32::u5;
+use bitcoin::blockdata::script::Script;
+use bitcoin::secp256k1::{PublicKey, Scalar, SecretKey};
+use bitcoin::secp256k1::ecdh::SharedSecret;
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
+
+use lightning::chain::keysinterface::{Recipient, KeyMaterial, KeysInterface};
+use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
+use lightning::ln::script::ShutdownScript;
+use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::logger::Logger;
+use lightning::util::ser::{Readable, Writer};
+use lightning::onion_message::OnionMessenger;
+
+use utils::test_logger;
+
+use std::io::Cursor;
+use std::sync::atomic::{AtomicU64, Ordering};
+
+#[inline]
+/// Actual fuzz test, method signature and name are fixed
+pub fn do_test<L: Logger>(data: &[u8], logger: &L) {
+       if let Ok(msg) = <msgs::OnionMessage as Readable>::read(&mut Cursor::new(data)) {
+               let mut secret_bytes = [0; 32];
+               secret_bytes[31] = 2;
+               let secret = SecretKey::from_slice(&secret_bytes).unwrap();
+               let keys_manager = KeyProvider {
+                       node_secret: secret,
+                       counter: AtomicU64::new(0),
+               };
+               let onion_messenger = OnionMessenger::new(&keys_manager, logger);
+               let mut pk = [2; 33]; pk[1] = 0xff;
+               let peer_node_id_not_used = PublicKey::from_slice(&pk).unwrap();
+               onion_messenger.handle_onion_message(&peer_node_id_not_used, &msg);
+       }
+}
+
+/// Method that needs to be added manually, {name}_test
+pub fn onion_message_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+       let logger = test_logger::TestLogger::new("".to_owned(), out);
+       do_test(data, &logger);
+}
+
+/// Method that needs to be added manually, {name}_run
+#[no_mangle]
+pub extern "C" fn onion_message_run(data: *const u8, datalen: usize) {
+       let logger = test_logger::TestLogger::new("".to_owned(), test_logger::DevNull {});
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, &logger);
+}
+
+pub struct VecWriter(pub Vec<u8>);
+impl Writer for VecWriter {
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+               self.0.extend_from_slice(buf);
+               Ok(())
+       }
+}
+struct KeyProvider {
+       node_secret: SecretKey,
+       counter: AtomicU64,
+}
+impl KeysInterface for KeyProvider {
+       type Signer = EnforcingSigner;
+
+       fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> {
+               Ok(self.node_secret.clone())
+       }
+
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
+       fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!() }
+
+       fn get_destination_script(&self) -> Script { unreachable!() }
+
+       fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!() }
+
+       fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> EnforcingSigner {
+               unreachable!()
+       }
+
+       fn get_secure_random_bytes(&self) -> [u8; 32] {
+               let ctr = self.counter.fetch_add(1, Ordering::Relaxed);
+               [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8]
+       }
+
+       fn read_chan_signer(&self, _data: &[u8]) -> Result<EnforcingSigner, DecodeError> { unreachable!() }
+
+       fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
+               unreachable!()
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use lightning::util::logger::{Logger, Record};
+       use std::collections::HashMap;
+       use std::sync::Mutex;
+
+       struct TrackingLogger {
+               /// (module, message) -> count
+               pub lines: Mutex<HashMap<(String, String), usize>>,
+       }
+       impl Logger for TrackingLogger {
+               fn log(&self, record: &Record) {
+                       *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
+                       println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
+               }
+       }
+
+       #[test]
+       fn test_no_onion_message_breakage() {
+               let one_hop_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e01120410950000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009300000000000000000000000000000000000000000000000000000000000000";
+               let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+               super::do_test(&::hex::decode(one_hop_om).unwrap(), &logger);
+               {
+                       let log_entries = logger.lines.lock().unwrap();
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Received an onion message with path_id: None and no reply_path".to_string())), Some(&1));
+               }
+
+               let two_unblinded_hops_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e0135043304210200000000000000000000000000000000000000000000000000000000000000039500000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000001204105e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+               let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+               super::do_test(&::hex::decode(two_unblinded_hops_om).unwrap(), &logger);
+               {
+                       let log_entries = logger.lines.lock().unwrap();
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+               }
+
+               let two_unblinded_two_blinded_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e01350433042102000000000000000000000000000000000000000000000000000000000000000395000000000000000000000000000000530000000000000000000000000000000000000000000000000000000000000058045604210200000000000000000000000000000000000000000000000000000000000000040821020000000000000000000000000000000000000000000000000000000000000e015e0000000000000000000000000000006b0000000000000000000000000000000000000000000000000000000000000035043304210200000000000000000000000000000000000000000000000000000000000000054b000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000120410ee00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+               let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+               super::do_test(&::hex::decode(two_unblinded_two_blinded_om).unwrap(), &logger);
+               {
+                       let log_entries = logger.lines.lock().unwrap();
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+               }
+
+               let three_blinded_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e013504330421020000000000000000000000000000000000000000000000000000000000000003950000000000000000000000000000007f0000000000000000000000000000000000000000000000000000000000000035043304210200000000000000000000000000000000000000000000000000000000000000045e0000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000001204104a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+               let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+               super::do_test(&::hex::decode(three_blinded_om).unwrap(), &logger);
+               {
+                       let log_entries = logger.lines.lock().unwrap();
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+               }
+       }
+}
index d649710526ad77f43e8c5dc969a6cdbdca7dd382..3c8e37175d302df47bb35e1991eb8095386d937e 100644 (file)
@@ -1,11 +1,12 @@
 // Imports that need to be added manually
 use lightning_rapid_gossip_sync::RapidGossipSync;
+use bitcoin::hashes::Hash as TraitImport;
 
 use utils::test_logger;
 
 /// Actual fuzz test, method signature and name are fixed
 fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
-       let block_hash = bitcoin::BlockHash::default();
+       let block_hash = bitcoin::BlockHash::all_zeros();
        let logger = test_logger::TestLogger::new("".to_owned(), out);
        let network_graph = lightning::routing::gossip::NetworkGraph::new(block_hash, &logger);
        let rapid_sync = RapidGossipSync::new(&network_graph);
index 7958a6f614d817ee6783e3d4a7c175912204d15c..cff3f9bdbb52dd87e5f2cfbd68525768e109963e 100644 (file)
@@ -2,6 +2,7 @@
 void chanmon_deser_run(const unsigned char* data, size_t data_len);
 void chanmon_consistency_run(const unsigned char* data, size_t data_len);
 void full_stack_run(const unsigned char* data, size_t data_len);
+void onion_message_run(const unsigned char* data, size_t data_len);
 void peer_crypt_run(const unsigned char* data, size_t data_len);
 void process_network_graph_run(const unsigned char* data, size_t data_len);
 void router_run(const unsigned char* data, size_t data_len);
index 2df83f2b75a1e2f646ffdf3abf62b60cac6304b3..ef07a3c9df5fe9dfa0dd9752da03c674d8c1271a 100644 (file)
@@ -14,7 +14,7 @@ all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
 lightning = { version = "0.0.110", path = "../lightning", features = ["std"] }
 lightning-rapid-gossip-sync = { version = "0.0.110", path = "../lightning-rapid-gossip-sync" }
 
index 484439b3907b364dddc3dc2c13bb6c078be2ad0c..e95c9c3709edfe80cfce45d57bdd003a928725f7 100644 (file)
@@ -2,7 +2,10 @@
 //! running properly, and (2) either can or should be run in the background. See docs for
 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
 
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
 
@@ -16,7 +19,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
 use lightning::chain::keysinterface::{Sign, KeysInterface};
 use lightning::ln::channelmanager::ChannelManager;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::scoring::WriteableScore;
@@ -278,6 +281,7 @@ impl BackgroundProcessor {
                P: 'static + Deref + Send + Sync,
                Descriptor: 'static + SocketDescriptor + Send + Sync,
                CMH: 'static + Deref + Send + Sync,
+               OMH: 'static + Deref + Send + Sync,
                RMH: 'static + Deref + Send + Sync,
                EH: 'static + EventHandler + Send,
                PS: 'static + Deref + Send,
@@ -286,7 +290,7 @@ impl BackgroundProcessor {
                PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
                RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
                UMH: 'static + Deref + Send + Sync,
-               PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
+               PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH>> + Send + Sync,
                S: 'static + Deref<Target = SC> + Send + Sync,
                SC: WriteableScore<'a>,
        >(
@@ -303,6 +307,7 @@ impl BackgroundProcessor {
                L::Target: 'static + Logger,
                P::Target: 'static + Persist<Signer>,
                CMH::Target: 'static + ChannelMessageHandler,
+               OMH::Target: 'static + OnionMessageHandler,
                RMH::Target: 'static + RoutingMessageHandler,
                UMH::Target: 'static + CustomMessageHandler,
                PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
@@ -488,6 +493,7 @@ impl Drop for BackgroundProcessor {
 mod tests {
        use bitcoin::blockdata::block::BlockHeader;
        use bitcoin::blockdata::constants::genesis_block;
+       use bitcoin::blockdata::locktime::PackedLockTime;
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::network::constants::Network;
        use lightning::chain::{BestBlock, Confirm, chainmonitor};
@@ -513,6 +519,8 @@ mod tests {
        use std::sync::{Arc, Mutex};
        use std::sync::mpsc::SyncSender;
        use std::time::Duration;
+       use bitcoin::hashes::Hash;
+       use bitcoin::TxMerkleNode;
        use lightning::routing::scoring::{FixedPenaltyScorer};
        use lightning_rapid_gossip_sync::RapidGossipSync;
        use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
@@ -538,7 +546,7 @@ mod tests {
                node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
                p2p_gossip_sync: PGS,
                rapid_gossip_sync: RGS,
-               peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
+               peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
                chain_monitor: Arc<ChainMonitor>,
                persister: Arc<FilesystemPersister>,
                tx_broadcaster: Arc<test_utils::TestBroadcaster>,
@@ -657,7 +665,7 @@ mod tests {
                        let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash(), logger.clone()));
                        let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
                        let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone()));
-                       let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
+                       let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
                        let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
                        let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
                        let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
@@ -700,7 +708,7 @@ mod tests {
                                        assert_eq!(channel_value_satoshis, $channel_value);
                                        assert_eq!(user_channel_id, 42);
 
-                                       let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                                       let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
                                                value: channel_value_satoshis, script_pubkey: output_script.clone(),
                                        }]};
                                        (temporary_channel_id, tx)
@@ -722,7 +730,7 @@ mod tests {
                for i in 1..=depth {
                        let prev_blockhash = node.best_block.block_hash();
                        let height = node.best_block.height() + 1;
-                       let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
+                       let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
                        let txdata = vec![(0, tx)];
                        node.best_block = BestBlock::new(header.block_hash(), height);
                        match i {
index c6650208e26023f7c693bb5aa723439dacb91cb3..27fa32149f1c4cadb3864f9214767778ebd75832 100644 (file)
@@ -18,7 +18,7 @@ rest-client = [ "serde", "serde_json", "chunked_transfer" ]
 rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
 
 [dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
 lightning = { version = "0.0.110", path = "../lightning" }
 futures = { version = "0.3" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
index 8023c83751920647caecb9ecb67288cc0cfc6506..ed28833b7b30d4986fe53e2801d2ddaf43dcf765 100644 (file)
@@ -15,6 +15,7 @@ use serde_json;
 use std::convert::From;
 use std::convert::TryFrom;
 use std::convert::TryInto;
+use bitcoin::hashes::Hash;
 
 /// Conversion from `std::io::Error` into `BlockSourceError`.
 impl From<std::io::Error> for BlockSourceError {
@@ -57,7 +58,7 @@ impl TryInto<BlockHeaderData> for JsonResponse {
 
                // Add an empty previousblockhash for the genesis block.
                if let None = header.get("previousblockhash") {
-                       let hash: BlockHash = Default::default();
+                       let hash: BlockHash = BlockHash::all_zeros();
                        header.as_object_mut().unwrap().insert("previousblockhash".to_string(), serde_json::json!(hash.to_hex()));
                }
 
index 321dd57e4713a638c86ba4fc033a89473d0129af..823cb5eb554e2e9a0525004f4c84550adb9ab2a7 100644 (file)
 //! Both features support either blocking I/O using `std::net::TcpStream` or, with feature `tokio`,
 //! non-blocking I/O using `tokio::net::TcpStream` from inside a Tokio runtime.
 
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
 
index baaab456b5adeb8fb25df64d6d306c011a0062dd..0c402deb3294663527afaab0011cea1f627569ef 100644 (file)
@@ -7,7 +7,7 @@ use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
 use bitcoin::util::uint::Uint256;
 use bitcoin::util::hash::bitcoin_merkle_root;
-use bitcoin::Transaction;
+use bitcoin::{PackedLockTime, Transaction};
 
 use lightning::chain;
 
@@ -45,7 +45,7 @@ impl Blockchain {
                        // but that's OK because those tests don't trigger the check.
                        let coinbase = Transaction {
                                version: 0,
-                               lock_time: 0,
+                               lock_time: PackedLockTime::ZERO,
                                input: vec![],
                                output: vec![]
                        };
index cc0e68cfc422b1bd098b2b985509b6b8cd1bb710..cd67bcd36dc486d911b84a2842116ce5e1b2dd16 100644 (file)
@@ -19,11 +19,11 @@ no-std = ["hashbrown", "lightning/no-std", "core2/alloc"]
 std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
 
 [dependencies]
-bech32 = { version = "0.8", default-features = false }
+bech32 = { version = "0.9.0", default-features = false }
 lightning = { version = "0.0.110", path = "../lightning", default-features = false }
-secp256k1 = { version = "0.22", default-features = false, features = ["recovery", "alloc"] }
+secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
-bitcoin_hashes = { version = "0.10", default-features = false }
+bitcoin_hashes = { version = "0.11", default-features = false }
 hashbrown = { version = "0.11", optional = true }
 core2 = { version = "0.3.0", default-features = false, optional = true }
 serde = { version = "1.0.118", optional = true }
index d741864aea2ec8d55a824f725d8e875e9a9d6c0c..6f79757c22bb810d574a0438003b70ee76444e6b 100644 (file)
@@ -12,11 +12,11 @@ afl_fuzz = ["afl"]
 honggfuzz_fuzz = ["honggfuzz"]
 
 [dependencies]
-honggfuzz = { version = "0.5", optional = true }
+honggfuzz = { version = "0.5", optional = true, default-features = false }
 afl = { version = "0.4", optional = true }
 lightning-invoice = { path = ".." }
 lightning = { path = "../../lightning", features = ["regex"] }
-bech32 = "0.8"
+bech32 = "0.9.0"
 
 # Prevent this from interfering with workspaces
 [workspace]
index ae85ea91301283a5a6765949f361deaba9986304..db1b9eb388c5231d58dbb0e9d03e338f40b2c8e6 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 set -e
-cargo install --force honggfuzz
+cargo install --force honggfuzz --no-default-features
 for TARGET in fuzz_targets/*; do
     FILENAME=$(basename $TARGET)
        FILE="${FILENAME%.*}"
index bad024c66c5a37114d144c146014294e0f747501..4fc58271ed722f17d3b0392a7612915cbcc45a97 100644 (file)
@@ -1,9 +1,12 @@
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
 #![deny(missing_docs)]
 #![deny(non_upper_case_globals)]
 #![deny(non_camel_case_types)]
 #![deny(non_snake_case)]
 #![deny(unused_mut)]
-#![deny(broken_intra_doc_links)]
 
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
 
@@ -236,7 +239,7 @@ pub struct InvoiceBuilder<D: tb::Bool, H: tb::Bool, T: tb::Bool, C: tb::Bool, S:
 ///  1. using `InvoiceBuilder`
 ///  2. using `Invoice::from_signed(SignedRawInvoice)`
 ///  3. using `str::parse::<Invoice>(&str)`
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct Invoice {
        signed_invoice: SignedRawInvoice,
 }
@@ -260,7 +263,7 @@ pub enum InvoiceDescription<'f> {
 ///
 /// # Invariants
 /// The hash has to be either from the deserialized invoice or from the serialized `raw_invoice`.
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct SignedRawInvoice {
        /// The rawInvoice that the signature belongs to
        raw_invoice: RawInvoice,
@@ -283,7 +286,7 @@ pub struct SignedRawInvoice {
 /// De- and encoding should not lead to information loss but may lead to different hashes.
 ///
 /// For methods without docs see the corresponding methods in `Invoice`.
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct RawInvoice {
        /// human readable part
        pub hrp: RawHrp,
@@ -295,7 +298,7 @@ pub struct RawInvoice {
 /// Data of the `RawInvoice` that is encoded in the human readable part
 ///
 /// (C-not exported) As we don't yet support Option<Enum>
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct RawHrp {
        /// The currency deferred from the 3rd and 4th character of the bech32 transaction
        pub currency: Currency,
@@ -308,7 +311,7 @@ pub struct RawHrp {
 }
 
 /// Data of the `RawInvoice` that is encoded in the data part
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct RawDataPart {
        /// generation time of the invoice
        pub timestamp: PositiveTimestamp,
@@ -323,11 +326,11 @@ pub struct RawDataPart {
 ///
 /// The Unix timestamp representing the stored time has to be positive and no greater than
 /// [`MAX_TIMESTAMP`].
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
 pub struct PositiveTimestamp(Duration);
 
 /// SI prefixes for the human readable part
-#[derive(Eq, PartialEq, Debug, Clone, Copy)]
+#[derive(Eq, PartialEq, Debug, Clone, Copy, Hash)]
 pub enum SiPrefix {
        /// 10^-3
        Milli,
@@ -453,7 +456,7 @@ pub enum Fallback {
 }
 
 /// Recoverable signature
-#[derive(Clone, Debug, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq)]
 pub struct InvoiceSignature(pub RecoverableSignature);
 
 /// Private routing information
index 051893ea0a3a5c9ce0405045e828644928f5c8d0..e7cc43508d933bb795d02d7b8e50bdd047dd8320 100644 (file)
@@ -77,8 +77,8 @@
 //! # }
 //! #
 //! # struct FakeRouter {}
-//! # impl<S: Score> Router<S> for FakeRouter {
-//! #     fn find_route(
+//! # impl Router for FakeRouter {
+//! #     fn find_route<S: Score>(
 //! #         &self, payer: &PublicKey, params: &RouteParameters, payment_hash: &PaymentHash,
 //! #         first_hops: Option<&[&ChannelDetails]>, scorer: &S
 //! #     ) -> Result<Route, LightningError> { unimplemented!() }
@@ -144,8 +144,10 @@ use crate::prelude::*;
 use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure};
 use lightning::ln::msgs::LightningError;
-use lightning::routing::scoring::{LockableScore, Score};
-use lightning::routing::router::{PaymentParameters, Route, RouteParameters};
+use lightning::routing::gossip::NodeId;
+use lightning::routing::scoring::{ChannelUsage, LockableScore, Score};
+use lightning::routing::router::{PaymentParameters, Route, RouteHop, RouteParameters};
+use lightning::util::errors::APIError;
 use lightning::util::events::{Event, EventHandler};
 use lightning::util::logger::Logger;
 use time_utils::Time;
@@ -175,10 +177,9 @@ use time_utils;
 type ConfiguredTime = time_utils::Eternity;
 
 /// (C-not exported) generally all users should use the [`InvoicePayer`] type alias.
-pub struct InvoicePayerUsingTime<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time>
+pub struct InvoicePayerUsingTime<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time>
 where
        P::Target: Payer,
-       R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
        S::Target: for <'a> LockableScore<'a>,
        L::Target: Logger,
 {
@@ -188,10 +189,69 @@ where
        logger: L,
        event_handler: E,
        /// Caches the overall attempts at making a payment, which is updated prior to retrying.
-       payment_cache: Mutex<HashMap<PaymentHash, PaymentAttempts<T>>>,
+       payment_cache: Mutex<HashMap<PaymentHash, PaymentInfo<T>>>,
        retry: Retry,
 }
 
+/// Used by [`InvoicePayerUsingTime::payment_cache`] to track the payments that are either
+/// currently being made, or have outstanding paths that need retrying.
+struct PaymentInfo<T: Time> {
+       attempts: PaymentAttempts<T>,
+       paths: Vec<Vec<RouteHop>>,
+}
+
+impl<T: Time> PaymentInfo<T> {
+       fn new() -> Self {
+               PaymentInfo {
+                       attempts: PaymentAttempts::new(),
+                       paths: vec![],
+               }
+       }
+}
+
+/// Used to store information about all the HTLCs that are inflight across all payment attempts
+struct AccountForInFlightHtlcs<'a, S: Score> {
+       scorer: &'a mut S,
+       /// Maps a channel's short channel id and its direction to the liquidity used up.
+       inflight_htlcs: HashMap<(u64, bool), u64>,
+}
+
+#[cfg(c_bindings)]
+impl<'a, S:Score> lightning::util::ser::Writeable for AccountForInFlightHtlcs<'a, S> {
+       fn write<W: lightning::util::ser::Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> { self.scorer.write(writer) }
+}
+
+impl<'a, S: Score> Score for AccountForInFlightHtlcs<'a, S> {
+       fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 {
+               if let Some(used_liqudity) = self.inflight_htlcs.get(&(short_channel_id, source < target)) {
+                       let usage = ChannelUsage {
+                               inflight_htlc_msat: usage.inflight_htlc_msat + used_liqudity,
+                               ..usage
+                       };
+
+                       self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+               } else {
+                       self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+               }
+       }
+
+       fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+               self.scorer.payment_path_failed(path, short_channel_id)
+       }
+
+       fn payment_path_successful(&mut self, path: &[&RouteHop]) {
+               self.scorer.payment_path_successful(path)
+       }
+
+       fn probe_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+               self.scorer.probe_failed(path, short_channel_id)
+       }
+
+       fn probe_successful(&mut self, path: &[&RouteHop]) {
+               self.scorer.probe_successful(path)
+       }
+}
+
 /// Storing minimal payment attempts information required for determining if a outbound payment can
 /// be retried.
 #[derive(Clone, Copy)]
@@ -252,9 +312,9 @@ pub trait Payer {
 }
 
 /// A trait defining behavior for routing an [`Invoice`] payment.
-pub trait Router<S: Score> {
+pub trait Router {
        /// Finds a [`Route`] between `payer` and `payee` for a payment with the given values.
-       fn find_route(
+       fn find_route<S: Score>(
                &self, payer: &PublicKey, route_params: &RouteParameters, payment_hash: &PaymentHash,
                first_hops: Option<&[&ChannelDetails]>, scorer: &S
        ) -> Result<Route, LightningError>;
@@ -299,10 +359,9 @@ pub enum PaymentError {
        Sending(PaymentSendFailure),
 }
 
-impl<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time> InvoicePayerUsingTime<P, R, S, L, E, T>
+impl<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time> InvoicePayerUsingTime<P, R, S, L, E, T>
 where
        P::Target: Payer,
-       R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
        S::Target: for <'a> LockableScore<'a>,
        L::Target: Logger,
 {
@@ -361,7 +420,7 @@ where
                let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner());
                match self.payment_cache.lock().unwrap().entry(payment_hash) {
                        hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
-                       hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
+                       hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
                };
 
                let payment_secret = Some(invoice.payment_secret().clone());
@@ -397,7 +456,7 @@ where
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
                match self.payment_cache.lock().unwrap().entry(payment_hash) {
                        hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
-                       hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
+                       hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
                };
 
                let route_params = RouteParameters {
@@ -425,28 +484,46 @@ where
 
                let payer = self.payer.node_id();
                let first_hops = self.payer.first_hops();
+               let inflight_htlcs = self.create_inflight_map();
                let route = self.router.find_route(
-                       &payer, params, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       &self.scorer.lock()
+                       &payer, &params, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       &AccountForInFlightHtlcs { scorer: &mut self.scorer.lock(), inflight_htlcs }
                ).map_err(|e| PaymentError::Routing(e))?;
 
                match send_payment(&route) {
-                       Ok(payment_id) => Ok(payment_id),
+                       Ok(payment_id) => {
+                               for path in route.paths {
+                                       self.process_path_inflight_htlcs(payment_hash, path);
+                               }
+                               Ok(payment_id)
+                       },
                        Err(e) => match e {
                                PaymentSendFailure::ParameterError(_) => Err(e),
                                PaymentSendFailure::PathParameterError(_) => Err(e),
                                PaymentSendFailure::AllFailedRetrySafe(_) => {
                                        let mut payment_cache = self.payment_cache.lock().unwrap();
-                                       let payment_attempts = payment_cache.get_mut(&payment_hash).unwrap();
-                                       payment_attempts.count += 1;
-                                       if self.retry.is_retryable_now(payment_attempts) {
+                                       let payment_info = payment_cache.get_mut(&payment_hash).unwrap();
+                                       payment_info.attempts.count += 1;
+                                       if self.retry.is_retryable_now(&payment_info.attempts) {
                                                core::mem::drop(payment_cache);
                                                Ok(self.pay_internal(params, payment_hash, send_payment)?)
                                        } else {
                                                Err(e)
                                        }
                                },
-                               PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, .. } => {
+                               PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, results } => {
+                                       // If a `PartialFailure` event returns a result that is an `Ok()`, it means that
+                                       // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+                                       // means that we are still waiting for our channel monitor update to be completed.
+                                       for (result, path) in results.iter().zip(route.paths.into_iter()) {
+                                               match result {
+                                                       Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+                                                               self.process_path_inflight_htlcs(payment_hash, path);
+                                                       },
+                                                       _ => {},
+                                               }
+                                       }
+
                                        if let Some(retry_data) = failed_paths_retry {
                                                // Some paths were sent, even if we failed to send the full MPP value our
                                                // recipient may misbehave and claim the funds, at which point we have to
@@ -466,16 +543,36 @@ where
                }.map_err(|e| PaymentError::Sending(e))
        }
 
+       // Takes in a path to have its information stored in `payment_cache`. This is done for paths
+       // that are pending retry.
+       fn process_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: Vec<RouteHop>) {
+               self.payment_cache.lock().unwrap().entry(payment_hash)
+                       .or_insert_with(|| PaymentInfo::new())
+                       .paths.push(path);
+       }
+
+       // Find the path we want to remove in `payment_cache`. If it doesn't exist, do nothing.
+       fn remove_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: &Vec<RouteHop>) {
+               self.payment_cache.lock().unwrap().entry(payment_hash)
+                       .and_modify(|payment_info| {
+                               if let Some(idx) = payment_info.paths.iter().position(|p| p == path) {
+                                       payment_info.paths.swap_remove(idx);
+                               }
+                       });
+       }
+
        fn retry_payment(
                &self, payment_id: PaymentId, payment_hash: PaymentHash, params: &RouteParameters
        ) -> Result<(), ()> {
-               let attempts =
-                       *self.payment_cache.lock().unwrap().entry(payment_hash)
-                       .and_modify(|attempts| attempts.count += 1)
-                       .or_insert(PaymentAttempts {
-                               count: 1,
-                               first_attempted_at: T::now()
-                       });
+               let attempts = self.payment_cache.lock().unwrap().entry(payment_hash)
+                       .and_modify(|info| info.attempts.count += 1 )
+                       .or_insert_with(|| PaymentInfo {
+                               attempts: PaymentAttempts {
+                                       count: 1,
+                                       first_attempted_at: T::now(),
+                               },
+                               paths: vec![],
+                       }).attempts;
 
                if !self.retry.is_retryable_now(&attempts) {
                        log_trace!(self.logger, "Payment {} exceeded maximum attempts; not retrying ({})", log_bytes!(payment_hash.0), attempts);
@@ -491,17 +588,25 @@ where
 
                let payer = self.payer.node_id();
                let first_hops = self.payer.first_hops();
+               let inflight_htlcs = self.create_inflight_map();
+
                let route = self.router.find_route(
                        &payer, &params, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       &self.scorer.lock()
+                       &AccountForInFlightHtlcs { scorer: &mut self.scorer.lock(), inflight_htlcs }
                );
+
                if route.is_err() {
                        log_trace!(self.logger, "Failed to find a route for payment {}; not retrying ({:})", log_bytes!(payment_hash.0), attempts);
                        return Err(());
                }
 
-               match self.payer.retry_payment(&route.unwrap(), payment_id) {
-                       Ok(()) => Ok(()),
+               match self.payer.retry_payment(&route.as_ref().unwrap(), payment_id) {
+                       Ok(()) => {
+                               for path in route.unwrap().paths.into_iter() {
+                                       self.process_path_inflight_htlcs(payment_hash, path);
+                               }
+                               Ok(())
+                       },
                        Err(PaymentSendFailure::ParameterError(_)) |
                        Err(PaymentSendFailure::PathParameterError(_)) => {
                                log_trace!(self.logger, "Failed to retry for payment {} due to bogus route/payment data, not retrying.", log_bytes!(payment_hash.0));
@@ -510,7 +615,19 @@ where
                        Err(PaymentSendFailure::AllFailedRetrySafe(_)) => {
                                self.retry_payment(payment_id, payment_hash, params)
                        },
-                       Err(PaymentSendFailure::PartialFailure { failed_paths_retry, .. }) => {
+                       Err(PaymentSendFailure::PartialFailure { failed_paths_retry, results, .. }) => {
+                               // If a `PartialFailure` error contains a result that is an `Ok()`, it means that
+                               // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+                               // means that we are still waiting for our channel monitor update to complete.
+                               for (result, path) in results.iter().zip(route.unwrap().paths.into_iter()) {
+                                       match result {
+                                               Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+                                                       self.process_path_inflight_htlcs(payment_hash, path);
+                                               },
+                                               _ => {},
+                                       }
+                               }
+
                                if let Some(retry) = failed_paths_retry {
                                        // Always return Ok for the same reason as noted in pay_internal.
                                        let _ = self.retry_payment(payment_id, payment_hash, &retry);
@@ -527,6 +644,47 @@ where
        pub fn remove_cached_payment(&self, payment_hash: &PaymentHash) {
                self.payment_cache.lock().unwrap().remove(payment_hash);
        }
+
+       /// Given a [`PaymentHash`], this function looks up inflight path attempts in the payment_cache.
+       /// Then, it uses the path information inside the cache to construct a HashMap mapping a channel's
+       /// short channel id and direction to the amount being sent through it.
+       ///
+       /// This function should be called whenever we need information about currently used up liquidity
+       /// across payments.
+       fn create_inflight_map(&self) -> HashMap<(u64, bool), u64> {
+               let mut total_inflight_map: HashMap<(u64, bool), u64> = HashMap::new();
+               // Make an attempt at finding existing payment information from `payment_cache`. If it
+               // does not exist, it probably is a fresh payment and we can just return an empty
+               // HashMap.
+               for payment_info in self.payment_cache.lock().unwrap().values() {
+                       for path in &payment_info.paths {
+                               if path.is_empty() { break };
+                               // total_inflight_map needs to be direction-sensitive when keeping track of the HTLC value
+                               // that is held up. However, the `hops` array, which is a path returned by `find_route` in
+                               // the router excludes the payer node. In the following lines, the payer's information is
+                               // hardcoded with an inflight value of 0 so that we can correctly represent the first hop
+                               // in our sliding window of two.
+                               let our_node_id: PublicKey = self.payer.node_id();
+                               let reversed_hops_with_payer = path.iter().rev().skip(1)
+                                       .map(|hop| hop.pubkey)
+                                       .chain(core::iter::once(our_node_id));
+                               let mut cumulative_msat = 0;
+
+                               // Taking the reversed vector from above, we zip it with just the reversed hops list to
+                               // work "backwards" of the given path, since the last hop's `fee_msat` actually represents
+                               // the total amount sent.
+                               for (next_hop, prev_hop) in path.iter().rev().zip(reversed_hops_with_payer) {
+                                       cumulative_msat += next_hop.fee_msat;
+                                       total_inflight_map
+                                               .entry((next_hop.short_channel_id, NodeId::from_pubkey(&prev_hop) < NodeId::from_pubkey(&next_hop.pubkey)))
+                                               .and_modify(|used_liquidity_msat| *used_liquidity_msat += cumulative_msat)
+                                               .or_insert(cumulative_msat);
+                               }
+                       }
+               }
+
+               total_inflight_map
+       }
 }
 
 fn expiry_time_from_unix_epoch(invoice: &Invoice) -> Duration {
@@ -540,14 +698,23 @@ fn has_expired(route_params: &RouteParameters) -> bool {
        } else { false }
 }
 
-impl<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time> EventHandler for InvoicePayerUsingTime<P, R, S, L, E, T>
+impl<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time> EventHandler for InvoicePayerUsingTime<P, R, S, L, E, T>
 where
        P::Target: Payer,
-       R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
        S::Target: for <'a> LockableScore<'a>,
        L::Target: Logger,
 {
        fn handle_event(&self, event: &Event) {
+               match event {
+                       Event::PaymentPathFailed { payment_hash, path, ..  }
+                       | Event::PaymentPathSuccessful { path, payment_hash: Some(payment_hash), .. }
+                       | Event::ProbeSuccessful { payment_hash, path, .. }
+                       | Event::ProbeFailed { payment_hash, path, .. } => {
+                               self.remove_path_inflight_htlcs(*payment_hash, path);
+                       },
+                       _ => {},
+               }
+
                match event {
                        Event::PaymentPathFailed {
                                payment_id, payment_hash, rejected_by_dest, path, short_channel_id, retry, ..
@@ -583,7 +750,7 @@ where
                                let mut payment_cache = self.payment_cache.lock().unwrap();
                                let attempts = payment_cache
                                        .remove(payment_hash)
-                                       .map_or(1, |attempts| attempts.count + 1);
+                                       .map_or(1, |payment_info| payment_info.attempts.count + 1);
                                log_trace!(self.logger, "Payment {} succeeded (attempts: {})", log_bytes!(payment_hash.0), attempts);
                        },
                        Event::ProbeSuccessful { payment_hash, path, .. } => {
@@ -616,7 +783,7 @@ mod tests {
        use lightning::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::{ChannelMessageHandler, ErrorAction, LightningError};
-       use lightning::routing::gossip::NodeId;
+       use lightning::routing::gossip::{EffectiveCapacity, NodeId};
        use lightning::routing::router::{PaymentParameters, Route, RouteHop};
        use lightning::routing::scoring::ChannelUsage;
        use lightning::util::test_utils::TestLogger;
@@ -628,6 +795,7 @@ mod tests {
        use std::time::{SystemTime, Duration};
        use time_utils::tests::SinceEpoch;
        use DEFAULT_EXPIRY_TIME;
+       use lightning::util::errors::APIError::{ChannelUnavailable, MonitorUpdateFailed};
 
        fn invoice(payment_preimage: PaymentPreimage) -> Invoice {
                let payment_hash = Sha256::hash(&payment_preimage.0);
@@ -775,8 +943,8 @@ mod tests {
                let final_value_msat = invoice.amount_milli_satoshis().unwrap();
 
                let payer = TestPayer::new()
-                       .fails_with_partial_failure(retry.clone(), OnAttempt(1))
-                       .fails_with_partial_failure(retry, OnAttempt(2))
+                       .fails_with_partial_failure(retry.clone(), OnAttempt(1), None)
+                       .fails_with_partial_failure(retry, OnAttempt(2), None)
                        .expect_send(Amount::ForInvoice(final_value_msat))
                        .expect_send(Amount::OnRetry(final_value_msat / 2))
                        .expect_send(Amount::OnRetry(final_value_msat / 2));
@@ -1364,24 +1532,290 @@ mod tests {
                invoice_payer.handle_event(&event);
        }
 
+       #[test]
+       fn generates_correct_inflight_map_data() {
+               let event_handled = core::cell::RefCell::new(false);
+               let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+               let payment_preimage = PaymentPreimage([1; 32]);
+               let invoice = invoice(payment_preimage);
+               let payment_hash = Some(PaymentHash(invoice.payment_hash().clone().into_inner()));
+               let final_value_msat = invoice.amount_milli_satoshis().unwrap();
+
+               let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat));
+               let final_value_msat = invoice.amount_milli_satoshis().unwrap();
+               let route = TestRouter::route_for_value(final_value_msat);
+               let router = TestRouter {};
+               let scorer = RefCell::new(TestScorer::new());
+               let logger = TestLogger::new();
+               let invoice_payer =
+                       InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+               let payment_id = invoice_payer.pay_invoice(&invoice).unwrap();
+
+               let inflight_map = invoice_payer.create_inflight_map();
+               // First path check
+               assert_eq!(inflight_map.get(&(0, false)).unwrap().clone(), 94);
+               assert_eq!(inflight_map.get(&(1, true)).unwrap().clone(), 84);
+               assert_eq!(inflight_map.get(&(2, false)).unwrap().clone(), 64);
+
+               // Second path check
+               assert_eq!(inflight_map.get(&(3, false)).unwrap().clone(), 74);
+               assert_eq!(inflight_map.get(&(4, false)).unwrap().clone(), 64);
+
+               invoice_payer.handle_event(&Event::PaymentPathSuccessful {
+                       payment_id, payment_hash, path: route.paths[0].clone()
+               });
+
+               let inflight_map = invoice_payer.create_inflight_map();
+
+               assert_eq!(inflight_map.get(&(0, false)), None);
+               assert_eq!(inflight_map.get(&(1, true)), None);
+               assert_eq!(inflight_map.get(&(2, false)), None);
+
+               // Second path should still be inflight
+               assert_eq!(inflight_map.get(&(3, false)).unwrap().clone(), 74);
+               assert_eq!(inflight_map.get(&(4, false)).unwrap().clone(), 64)
+       }
+
+       #[test]
+       fn considers_inflight_htlcs_between_invoice_payments_when_path_succeeds() {
+               // First, let's just send a payment through, but only make sure one of the path completes
+               let event_handled = core::cell::RefCell::new(false);
+               let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+               let payment_preimage = PaymentPreimage([1; 32]);
+               let payment_invoice = invoice(payment_preimage);
+               let payment_hash = Some(PaymentHash(payment_invoice.payment_hash().clone().into_inner()));
+               let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+
+               let payer = TestPayer::new()
+                       .expect_send(Amount::ForInvoice(final_value_msat))
+                       .expect_send(Amount::ForInvoice(final_value_msat));
+               let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+               let route = TestRouter::route_for_value(final_value_msat);
+               let router = TestRouter {};
+               let scorer = RefCell::new(TestScorer::new()
+                       // 1st invoice, 1st path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // 1st invoice, 2nd path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // 2nd invoice, 1st path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // 2nd invoice, 2nd path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 74, effective_capacity: EffectiveCapacity::Unknown } )
+               );
+               let logger = TestLogger::new();
+               let invoice_payer =
+                       InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+               // Succeed 1st path, leave 2nd path inflight
+               let payment_id = invoice_payer.pay_invoice(&payment_invoice).unwrap();
+               invoice_payer.handle_event(&Event::PaymentPathSuccessful {
+                       payment_id, payment_hash, path: route.paths[0].clone()
+               });
+
+               // Let's pay a second invoice that will be using the same path. This should trigger the
+               // assertions that expect the last 4 ChannelUsage values above where TestScorer is initialized.
+               // Particularly, the 2nd path of the 1st payment, since it is not yet complete, should still
+               // have 64 msats inflight for paths considering the channel with scid of 1.
+               let payment_preimage_2 = PaymentPreimage([2; 32]);
+               let payment_invoice_2 = invoice(payment_preimage_2);
+               invoice_payer.pay_invoice(&payment_invoice_2).unwrap();
+       }
+
+       #[test]
+       fn considers_inflight_htlcs_between_retries() {
+               // First, let's just send a payment through, but only make sure one of the path completes
+               let event_handled = core::cell::RefCell::new(false);
+               let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+               let payment_preimage = PaymentPreimage([1; 32]);
+               let payment_invoice = invoice(payment_preimage);
+               let payment_hash = PaymentHash(payment_invoice.payment_hash().clone().into_inner());
+               let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+
+               let payer = TestPayer::new()
+                       .expect_send(Amount::ForInvoice(final_value_msat))
+                       .expect_send(Amount::OnRetry(final_value_msat / 2))
+                       .expect_send(Amount::OnRetry(final_value_msat / 4));
+               let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+               let router = TestRouter {};
+               let scorer = RefCell::new(TestScorer::new()
+                       // 1st invoice, 1st path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // 1st invoice, 2nd path
+                       .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // Retry 1, 1st path
+                       .expect_usage(ChannelUsage { amount_msat: 32, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 52, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 62, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // Retry 1, 2nd path
+                       .expect_usage(ChannelUsage { amount_msat: 32, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 42, inflight_htlc_msat: 64 + 10, effective_capacity: EffectiveCapacity::Unknown } )
+                       // Retry 2, 1st path
+                       .expect_usage(ChannelUsage { amount_msat: 16, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 36, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 46, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+                       // Retry 2, 2nd path
+                       .expect_usage(ChannelUsage { amount_msat: 16, inflight_htlc_msat: 64 + 32, effective_capacity: EffectiveCapacity::Unknown } )
+                       .expect_usage(ChannelUsage { amount_msat: 26, inflight_htlc_msat: 74 + 32 + 10, effective_capacity: EffectiveCapacity::Unknown } )
+               );
+               let logger = TestLogger::new();
+               let invoice_payer =
+                       InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(2));
+
+               // Fail 1st path, leave 2nd path inflight
+               let payment_id = Some(invoice_payer.pay_invoice(&payment_invoice).unwrap());
+               invoice_payer.handle_event(&Event::PaymentPathFailed {
+                       payment_id,
+                       payment_hash,
+                       network_update: None,
+                       rejected_by_dest: false,
+                       all_paths_failed: false,
+                       path: TestRouter::path_for_value(final_value_msat),
+                       short_channel_id: None,
+                       retry: Some(TestRouter::retry_for_invoice(&payment_invoice)),
+               });
+
+               // Fails again the 1st path of our retry
+               invoice_payer.handle_event(&Event::PaymentPathFailed {
+                       payment_id,
+                       payment_hash,
+                       network_update: None,
+                       rejected_by_dest: false,
+                       all_paths_failed: false,
+                       path: TestRouter::path_for_value(final_value_msat / 2),
+                       short_channel_id: None,
+                       retry: Some(RouteParameters {
+                               final_value_msat: final_value_msat / 4,
+                               ..TestRouter::retry_for_invoice(&payment_invoice)
+                       }),
+               });
+       }
+
+       #[test]
+       fn accounts_for_some_inflight_htlcs_sent_during_partial_failure() {
+               let event_handled = core::cell::RefCell::new(false);
+               let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+               let payment_preimage = PaymentPreimage([1; 32]);
+               let invoice_to_pay = invoice(payment_preimage);
+               let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
+
+               let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
+               let payer = TestPayer::new()
+                       .fails_with_partial_failure(
+                               retry.clone(), OnAttempt(1),
+                               Some(vec![
+                                       Err(ChannelUnavailable { err: "abc".to_string() }), Err(MonitorUpdateFailed)
+                               ]))
+                       .expect_send(Amount::ForInvoice(final_value_msat));
+
+               let router = TestRouter {};
+               let scorer = RefCell::new(TestScorer::new());
+               let logger = TestLogger::new();
+               let invoice_payer =
+                       InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+               invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
+               let inflight_map = invoice_payer.create_inflight_map();
+
+               // Only the second path, which failed with `MonitorUpdateFailed` should be added to our
+               // inflight map because retries are disabled.
+               assert_eq!(inflight_map.len(), 2);
+       }
+
+       #[test]
+       fn accounts_for_all_inflight_htlcs_sent_during_partial_failure() {
+               let event_handled = core::cell::RefCell::new(false);
+               let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+               let payment_preimage = PaymentPreimage([1; 32]);
+               let invoice_to_pay = invoice(payment_preimage);
+               let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
+
+               let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
+               let payer = TestPayer::new()
+                       .fails_with_partial_failure(
+                               retry.clone(), OnAttempt(1),
+                               Some(vec![
+                                       Ok(()), Err(MonitorUpdateFailed)
+                               ]))
+                       .expect_send(Amount::ForInvoice(final_value_msat));
+
+               let router = TestRouter {};
+               let scorer = RefCell::new(TestScorer::new());
+               let logger = TestLogger::new();
+               let invoice_payer =
+                       InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+               invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
+               let inflight_map = invoice_payer.create_inflight_map();
+
+               // All paths successful, hence we check of the existence of all 5 hops.
+               assert_eq!(inflight_map.len(), 5);
+       }
+
        struct TestRouter;
 
        impl TestRouter {
                fn route_for_value(final_value_msat: u64) -> Route {
                        Route {
                                paths: vec![
-                                       vec![RouteHop {
-                                               pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
-                                               channel_features: ChannelFeatures::empty(),
-                                               node_features: NodeFeatures::empty(),
-                                               short_channel_id: 0, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144
-                                       }],
-                                       vec![RouteHop {
-                                               pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
-                                               channel_features: ChannelFeatures::empty(),
-                                               node_features: NodeFeatures::empty(),
-                                               short_channel_id: 1, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144
-                                       }],
+                                       vec![
+                                               RouteHop {
+                                                       pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
+                                                       channel_features: ChannelFeatures::empty(),
+                                                       node_features: NodeFeatures::empty(),
+                                                       short_channel_id: 0,
+                                                       fee_msat: 10,
+                                                       cltv_expiry_delta: 0
+                                               },
+                                               RouteHop {
+                                                       pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
+                                                       channel_features: ChannelFeatures::empty(),
+                                                       node_features: NodeFeatures::empty(),
+                                                       short_channel_id: 1,
+                                                       fee_msat: 20,
+                                                       cltv_expiry_delta: 0
+                                               },
+                                               RouteHop {
+                                                       pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
+                                                       channel_features: ChannelFeatures::empty(),
+                                                       node_features: NodeFeatures::empty(),
+                                                       short_channel_id: 2,
+                                                       fee_msat: final_value_msat / 2,
+                                                       cltv_expiry_delta: 0
+                                               },
+                                       ],
+                                       vec![
+                                               RouteHop {
+                                                       pubkey: PublicKey::from_slice(&hex::decode("029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255").unwrap()[..]).unwrap(),
+                                                       channel_features: ChannelFeatures::empty(),
+                                                       node_features: NodeFeatures::empty(),
+                                                       short_channel_id: 3,
+                                                       fee_msat: 10,
+                                                       cltv_expiry_delta: 144
+                                               },
+                                               RouteHop {
+                                                       pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
+                                                       channel_features: ChannelFeatures::empty(),
+                                                       node_features: NodeFeatures::empty(),
+                                                       short_channel_id: 4,
+                                                       fee_msat: final_value_msat / 2,
+                                                       cltv_expiry_delta: 144
+                                               }
+                                       ],
                                ],
                                payment_params: None,
                        }
@@ -1407,11 +1841,33 @@ mod tests {
                }
        }
 
-       impl<S: Score> Router<S> for TestRouter {
-               fn find_route(
-                       &self, _payer: &PublicKey, route_params: &RouteParameters, _payment_hash: &PaymentHash,
-                       _first_hops: Option<&[&ChannelDetails]>, _scorer: &S
+       impl Router for TestRouter {
+               fn find_route<S: Score>(
+                       &self, payer: &PublicKey, route_params: &RouteParameters, _payment_hash: &PaymentHash,
+                       _first_hops: Option<&[&ChannelDetails]>, scorer: &S
                ) -> Result<Route, LightningError> {
+                       // Simulate calling the Scorer just as you would in find_route
+                       let route = Self::route_for_value(route_params.final_value_msat);
+                       for path in route.paths {
+                               let mut aggregate_msat = 0u64;
+                               for (idx, hop) in path.iter().rev().enumerate() {
+                                       aggregate_msat += hop.fee_msat;
+                                       let usage = ChannelUsage {
+                                               amount_msat: aggregate_msat,
+                                               inflight_htlc_msat: 0,
+                                               effective_capacity: EffectiveCapacity::Unknown,
+                                       };
+
+                                       // Since the path is reversed, the last element in our iteration is the first
+                                       // hop.
+                                       if idx == path.len() - 1 {
+                                               scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage);
+                                       } else {
+                                               scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path[idx + 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage);
+                                       }
+                               }
+                       }
+
                        Ok(Route {
                                payment_params: Some(route_params.payment_params.clone()), ..Self::route_for_value(route_params.final_value_msat)
                        })
@@ -1420,8 +1876,8 @@ mod tests {
 
        struct FailingRouter;
 
-       impl<S: Score> Router<S> for FailingRouter {
-               fn find_route(
+       impl Router for FailingRouter {
+               fn find_route<S: Score>(
                        &self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash,
                        _first_hops: Option<&[&ChannelDetails]>, _scorer: &S
                ) -> Result<Route, LightningError> {
@@ -1430,26 +1886,31 @@ mod tests {
        }
 
        struct TestScorer {
-               expectations: Option<VecDeque<TestResult>>,
+               event_expectations: Option<VecDeque<TestResult>>,
+               scorer_expectations: RefCell<Option<VecDeque<ChannelUsage>>>,
        }
 
        #[derive(Debug)]
        enum TestResult {
                PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
                PaymentSuccess { path: Vec<RouteHop> },
-               ProbeFailure { path: Vec<RouteHop>, short_channel_id: u64 },
-               ProbeSuccess { path: Vec<RouteHop> },
        }
 
        impl TestScorer {
                fn new() -> Self {
                        Self {
-                               expectations: None,
+                               event_expectations: None,
+                               scorer_expectations: RefCell::new(None),
                        }
                }
 
                fn expect(mut self, expectation: TestResult) -> Self {
-                       self.expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation);
+                       self.event_expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation);
+                       self
+               }
+
+               fn expect_usage(self, expectation: ChannelUsage) -> Self {
+                       self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back(expectation);
                        self
                }
        }
@@ -1461,11 +1922,22 @@ mod tests {
 
        impl Score for TestScorer {
                fn channel_penalty_msat(
-                       &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
-               ) -> u64 { 0 }
+                       &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage
+               ) -> u64 {
+                       if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() {
+                               match scorer_expectations.pop_front() {
+                                       Some(expectation) => {
+                                               assert_eq!(expectation.amount_msat, usage.amount_msat);
+                                               assert_eq!(expectation.inflight_htlc_msat, usage.inflight_htlc_msat);
+                                       },
+                                       None => {},
+                               }
+                       }
+                       0
+               }
 
                fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
-                       if let Some(expectations) = &mut self.expectations {
+                       if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front() {
                                        Some(TestResult::PaymentFailure { path, short_channel_id }) => {
                                                assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
@@ -1474,19 +1946,13 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_failed call: {:?}", actual_path),
                                }
                        }
                }
 
                fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
-                       if let Some(expectations) = &mut self.expectations {
+                       if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front() {
                                        Some(TestResult::PaymentFailure { path, .. }) => {
                                                panic!("Unexpected payment path failure: {:?}", path)
@@ -1494,19 +1960,13 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_successful call: {:?}", actual_path),
                                }
                        }
                }
 
-               fn probe_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
-                       if let Some(expectations) = &mut self.expectations {
+               fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
+                       if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front() {
                                        Some(TestResult::PaymentFailure { path, .. }) => {
                                                panic!("Unexpected failed payment path: {:?}", path)
@@ -1514,19 +1974,12 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, short_channel_id }) => {
-                                               assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
-                                               assert_eq!(actual_short_channel_id, short_channel_id);
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               panic!("Unexpected successful payment probe: {:?}", path)
-                                       },
                                        None => panic!("Unexpected payment_path_failed call: {:?}", actual_path),
                                }
                        }
                }
                fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
-                       if let Some(expectations) = &mut self.expectations {
+                       if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front() {
                                        Some(TestResult::PaymentFailure { path, .. }) => {
                                                panic!("Unexpected payment path failure: {:?}", path)
@@ -1534,12 +1987,6 @@ mod tests {
                                        Some(TestResult::PaymentSuccess { path }) => {
                                                panic!("Unexpected successful payment path: {:?}", path)
                                        },
-                                       Some(TestResult::ProbeFailure { path, .. }) => {
-                                               panic!("Unexpected failed payment probe: {:?}", path)
-                                       },
-                                       Some(TestResult::ProbeSuccess { path }) => {
-                                               assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
-                                       },
                                        None => panic!("Unexpected payment_path_successful call: {:?}", actual_path),
                                }
                        }
@@ -1552,9 +1999,15 @@ mod tests {
                                return;
                        }
 
-                       if let Some(expectations) = &self.expectations {
-                               if !expectations.is_empty() {
-                                       panic!("Unsatisfied scorer expectations: {:?}", expectations);
+                       if let Some(event_expectations) = &self.event_expectations {
+                               if !event_expectations.is_empty() {
+                                       panic!("Unsatisfied event expectations: {:?}", event_expectations);
+                               }
+                       }
+
+                       if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() {
+                               if !scorer_expectations.is_empty() {
+                                       panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations)
                                }
                        }
                }
@@ -1594,9 +2047,9 @@ mod tests {
                        self.fails_with(failure, OnAttempt(attempt))
                }
 
-               fn fails_with_partial_failure(self, retry: RouteParameters, attempt: OnAttempt) -> Self {
+               fn fails_with_partial_failure(self, retry: RouteParameters, attempt: OnAttempt, results: Option<Vec<Result<(), APIError>>>) -> Self {
                        self.fails_with(PaymentSendFailure::PartialFailure {
-                               results: vec![],
+                               results: results.unwrap_or(vec![]),
                                failed_paths_retry: Some(retry),
                                payment_id: PaymentId([1; 32]),
                        }, attempt)
@@ -1677,8 +2130,8 @@ mod tests {
        // *** Full Featured Functional Tests with a Real ChannelManager ***
        struct ManualRouter(RefCell<VecDeque<Result<Route, LightningError>>>);
 
-       impl<S: Score> Router<S> for ManualRouter {
-               fn find_route(
+       impl Router for ManualRouter {
+               fn find_route<S: Score>(
                        &self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash,
                        _first_hops: Option<&[&ChannelDetails]>, _scorer: &S
                ) -> Result<Route, LightningError> {
index 9ef2d2c8d39214e5a94423fa0c3e98006da9051c..72d022eeab123e228427f1fee48c2125661b0f0b 100644 (file)
@@ -455,9 +455,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> DefaultRouter<G, L> where L::
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Score> Router<S> for DefaultRouter<G, L>
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Router for DefaultRouter<G, L>
 where L::Target: Logger {
-       fn find_route(
+       fn find_route<S: Score>(
                &self, payer: &PublicKey, params: &RouteParameters, _payment_hash: &PaymentHash,
                first_hops: Option<&[&ChannelDetails]>, scorer: &S
        ) -> Result<Route, LightningError> {
index dd7770cbe8cdd65145e5ce663c45f2143754835e..a3e0cbb45860a31ab91d942cd7a8af9e858bc238 100644 (file)
@@ -15,7 +15,7 @@ all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
 lightning = { version = "0.0.110", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
 
index 645a7434e4536534138c5dd6150fe52bad55ff0c..7dfa38e95e82f150f73c939b79bb13dc50080043 100644 (file)
 //! }
 //! ```
 
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
-#![deny(missing_docs)]
+#![deny(private_intra_doc_links)]
 
+#![deny(missing_docs)]
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
 
 use bitcoin::secp256k1::PublicKey;
@@ -81,7 +83,7 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
 use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
+use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
 use lightning::util::logger::Logger;
 
 use std::ops::Deref;
@@ -121,13 +123,15 @@ struct Connection {
        id: u64,
 }
 impl Connection {
-       async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
+       async fn poll_event_process<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
                        CMH: Deref + 'static + Send + Sync,
                        RMH: Deref + 'static + Send + Sync,
+                       OMH: Deref + 'static + Send + Sync,
                        L: Deref + 'static + Send + Sync,
                        UMH: Deref + 'static + Send + Sync,
                        CMH::Target: ChannelMessageHandler + Send + Sync,
                        RMH::Target: RoutingMessageHandler + Send + Sync,
+                       OMH::Target: OnionMessageHandler + Send + Sync,
                        L::Target: Logger + Send + Sync,
                        UMH::Target: CustomMessageHandler + Send + Sync,
     {
@@ -139,13 +143,15 @@ impl Connection {
                }
        }
 
-       async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+       async fn schedule_read<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
                        CMH: Deref + 'static + Send + Sync,
                        RMH: Deref + 'static + Send + Sync,
+                       OMH: Deref + 'static + Send + Sync,
                        L: Deref + 'static + Send + Sync,
                        UMH: Deref + 'static + Send + Sync,
                        CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
                        RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
+                       OMH::Target: OnionMessageHandler + 'static + Send + Sync,
                        L::Target: Logger + 'static + Send + Sync,
                        UMH::Target: CustomMessageHandler + 'static + Send + Sync,
         {
@@ -266,13 +272,15 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
                CMH: Deref + 'static + Send + Sync,
                RMH: Deref + 'static + Send + Sync,
+               OMH: Deref + 'static + Send + Sync,
                L: Deref + 'static + Send + Sync,
                UMH: Deref + 'static + Send + Sync,
                CMH::Target: ChannelMessageHandler + Send + Sync,
                RMH::Target: RoutingMessageHandler + Send + Sync,
+               OMH::Target: OnionMessageHandler + Send + Sync,
                L::Target: Logger + Send + Sync,
                UMH::Target: CustomMessageHandler + Send + Sync,
 {
@@ -313,13 +321,15 @@ pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManag
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
                CMH: Deref + 'static + Send + Sync,
                RMH: Deref + 'static + Send + Sync,
+               OMH: Deref + 'static + Send + Sync,
                L: Deref + 'static + Send + Sync,
                UMH: Deref + 'static + Send + Sync,
                CMH::Target: ChannelMessageHandler + Send + Sync,
                RMH::Target: RoutingMessageHandler + Send + Sync,
+               OMH::Target: OnionMessageHandler + Send + Sync,
                L::Target: Logger + Send + Sync,
                UMH::Target: CustomMessageHandler + Send + Sync,
 {
@@ -389,13 +399,15 @@ pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerMana
 /// disconnected and associated handling futures are freed, though, because all processing in said
 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
 /// make progress.
-pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+pub async fn connect_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
                CMH: Deref + 'static + Send + Sync,
                RMH: Deref + 'static + Send + Sync,
+               OMH: Deref + 'static + Send + Sync,
                L: Deref + 'static + Send + Sync,
                UMH: Deref + 'static + Send + Sync,
                CMH::Target: ChannelMessageHandler + Send + Sync,
                RMH::Target: RoutingMessageHandler + Send + Sync,
+               OMH::Target: OnionMessageHandler + Send + Sync,
                L::Target: Logger + Send + Sync,
                UMH::Target: CustomMessageHandler + Send + Sync,
 {
@@ -562,8 +574,8 @@ mod tests {
                fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
                fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
                fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
-               fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
-               fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
+               fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
+               fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> { None }
                fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
                fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
                fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
@@ -644,6 +656,7 @@ mod tests {
                let a_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&a_handler),
                        route_handler: Arc::clone(&a_handler),
+                       onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
                }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
 
                let (b_connected_sender, mut b_connected) = mpsc::channel(1);
@@ -658,6 +671,7 @@ mod tests {
                let b_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&b_handler),
                        route_handler: Arc::clone(&b_handler),
+                       onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
                }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
 
                // We bind on localhost, hoping the environment is properly configured with a local
@@ -709,6 +723,7 @@ mod tests {
 
                let a_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
+                       onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
                        route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
                }, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
 
index 7de0ddc153c807c9a97016dfb6f1feaa4b1a5fdb..728743c8a4da2b35629c17ef93ebf33b156cd4df 100644 (file)
@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 _bench_unstable = ["lightning/_bench_unstable"]
 
 [dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
 lightning = { version = "0.0.110", path = "../lightning" }
 libc = "0.2"
 
index 3e32791711e1428746eaadef6f26f11248370439..b277c30ca8cfc9024a6de613f5ad7fea4b7d65c1 100644 (file)
@@ -1,6 +1,9 @@
 //! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
 
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
 #![deny(missing_docs)]
 
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
@@ -134,7 +137,7 @@ mod tests {
        use crate::FilesystemPersister;
        use bitcoin::blockdata::block::{Block, BlockHeader};
        use bitcoin::hashes::hex::FromHex;
-       use bitcoin::Txid;
+       use bitcoin::{Txid, TxMerkleNode};
        use lightning::chain::ChannelMonitorUpdateErr;
        use lightning::chain::chainmonitor::Persist;
        use lightning::chain::transaction::OutPoint;
@@ -144,6 +147,7 @@ mod tests {
        use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
        use lightning::util::test_utils;
        use std::fs;
+       use bitcoin::hashes::Hash;
        #[cfg(target_os = "windows")]
        use {
                lightning::get_event_msg,
@@ -221,7 +225,7 @@ mod tests {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 1);
 
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
                connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
                check_closed_broadcast!(nodes[1], true);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
index 39518bbbebd672ac6f3960288d48e8118be05ac2..b8bc8437bca04f2c96e08a12f5464635195c6993 100644 (file)
@@ -14,7 +14,7 @@ _bench_unstable = []
 
 [dependencies]
 lightning = { version = "0.0.110", path = "../lightning" }
-bitcoin = { version = "0.28.1", default-features = false }
+bitcoin = { version = "0.29.0", default-features = false }
 
 [dev-dependencies]
 lightning = { version = "0.0.110", path = "../lightning", features = ["_test_utils"] }
index 6e9280f86a3f85909c8c25e49fc254bcc3a06819..70758e1fe07e6a5ae0f5a430ac374ee3ae5708be 100644 (file)
@@ -1,6 +1,9 @@
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
-#![deny(broken_intra_doc_links)]
 #![deny(non_upper_case_globals)]
 #![deny(non_camel_case_types)]
 #![deny(non_snake_case)]
index 7980a9b1dc88182c1c7acb2ebd9df40a0afae1ab..fde54661420ec546db00e0ac6ef3d65ef3f19ba8 100644 (file)
@@ -38,7 +38,7 @@ grind_signatures = []
 default = ["std", "grind_signatures"]
 
 [dependencies]
-bitcoin = { version = "0.28.1", default-features = false, features = ["secp-recovery"] }
+bitcoin = { version = "0.29.0", default-features = false, features = ["secp-recovery"] }
 
 hashbrown = { version = "0.11", optional = true }
 hex = { version = "0.4", optional = true }
@@ -52,6 +52,6 @@ hex = "0.4"
 regex = "1.5.6"
 
 [dev-dependencies.bitcoin]
-version = "0.28.1"
+version = "0.29.0"
 default-features = false
 features = ["bitcoinconsensus", "secp-recovery"]
index 5c4ede0b16161819a8c7b54f4b8696cb5274912a..3d84fdf93a52f391200fde732b0954e77e01d724 100644 (file)
@@ -263,82 +263,67 @@ where C::Target: chain::Filter,
        where
                FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
        {
-               let mut dependent_txdata = Vec::new();
-               {
-                       let monitor_states = self.monitors.write().unwrap();
-                       if let Some(height) = best_height {
-                               // If the best block height is being updated, update highest_chain_height under the
-                               // monitors write lock.
-                               let old_height = self.highest_chain_height.load(Ordering::Acquire);
-                               let new_height = height as usize;
-                               if new_height > old_height {
-                                       self.highest_chain_height.store(new_height, Ordering::Release);
-                               }
+               let monitor_states = self.monitors.write().unwrap();
+               if let Some(height) = best_height {
+                       // If the best block height is being updated, update highest_chain_height under the
+                       // monitors write lock.
+                       let old_height = self.highest_chain_height.load(Ordering::Acquire);
+                       let new_height = height as usize;
+                       if new_height > old_height {
+                               self.highest_chain_height.store(new_height, Ordering::Release);
                        }
+               }
 
-                       for (funding_outpoint, monitor_state) in monitor_states.iter() {
-                               let monitor = &monitor_state.monitor;
-                               let mut txn_outputs;
-                               {
-                                       txn_outputs = process(monitor, txdata);
-                                       let update_id = MonitorUpdateId {
-                                               contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
-                                       };
-                                       let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
-                                       if let Some(height) = best_height {
-                                               if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
-                                                       // If there are not ChainSync persists awaiting completion, go ahead and
-                                                       // set last_chain_persist_height here - we wouldn't want the first
-                                                       // TemporaryFailure to always immediately be considered "overly delayed".
-                                                       monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
-                                               }
+               for (funding_outpoint, monitor_state) in monitor_states.iter() {
+                       let monitor = &monitor_state.monitor;
+                       let mut txn_outputs;
+                       {
+                               txn_outputs = process(monitor, txdata);
+                               let update_id = MonitorUpdateId {
+                                       contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
+                               };
+                               let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
+                               if let Some(height) = best_height {
+                                       if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
+                                               // If there are not ChainSync persists awaiting completion, go ahead and
+                                               // set last_chain_persist_height here - we wouldn't want the first
+                                               // TemporaryFailure to always immediately be considered "overly delayed".
+                                               monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
                                        }
+                               }
 
-                                       log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
-                                       match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
-                                               Ok(()) =>
-                                                       log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
-                                               Err(ChannelMonitorUpdateErr::PermanentFailure) => {
-                                                       monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
-                                               },
-                                               Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
-                                                       log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
-                                                       pending_monitor_updates.push(update_id);
-                                               },
-                                       }
+                               log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                               match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
+                                       Ok(()) =>
+                                               log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+                                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {
+                                               monitor_state.channel_perm_failed.store(true, Ordering::Release);
+                                               self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
+                                       },
+                                       Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
+                                               log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+                                               pending_monitor_updates.push(update_id);
+                                       },
                                }
+                       }
 
-                               // Register any new outputs with the chain source for filtering, storing any dependent
-                               // transactions from within the block that previously had not been included in txdata.
-                               if let Some(ref chain_source) = self.chain_source {
-                                       let block_hash = header.block_hash();
-                                       for (txid, mut outputs) in txn_outputs.drain(..) {
-                                               for (idx, output) in outputs.drain(..) {
-                                                       // Register any new outputs with the chain source for filtering and recurse
-                                                       // if it indicates that there are dependent transactions within the block
-                                                       // that had not been previously included in txdata.
-                                                       let output = WatchedOutput {
-                                                               block_hash: Some(block_hash),
-                                                               outpoint: OutPoint { txid, index: idx as u16 },
-                                                               script_pubkey: output.script_pubkey,
-                                                       };
-                                                       if let Some(tx) = chain_source.register_output(output) {
-                                                               dependent_txdata.push(tx);
-                                                       }
-                                               }
+                       // Register any new outputs with the chain source for filtering, storing any dependent
+                       // transactions from within the block that previously had not been included in txdata.
+                       if let Some(ref chain_source) = self.chain_source {
+                               let block_hash = header.block_hash();
+                               for (txid, mut outputs) in txn_outputs.drain(..) {
+                                       for (idx, output) in outputs.drain(..) {
+                                               // Register any new outputs with the chain source for filtering
+                                               let output = WatchedOutput {
+                                                       block_hash: Some(block_hash),
+                                                       outpoint: OutPoint { txid, index: idx as u16 },
+                                                       script_pubkey: output.script_pubkey,
+                                               };
+                                               chain_source.register_output(output)
                                        }
                                }
                        }
                }
-
-               // Recursively call for any dependent transactions that were identified by the chain source.
-               if !dependent_txdata.is_empty() {
-                       dependent_txdata.sort_unstable_by_key(|(index, _tx)| *index);
-                       dependent_txdata.dedup_by_key(|(index, _tx)| *index);
-                       let txdata: Vec<_> = dependent_txdata.iter().map(|(index, tx)| (*index, tx)).collect();
-                       self.process_chain_data(header, None, &txdata, process); // We skip the best height the second go-around
-               }
        }
 
        /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
@@ -733,7 +718,8 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
 
 #[cfg(test)]
 mod tests {
-       use bitcoin::BlockHeader;
+       use bitcoin::{BlockHeader, TxMerkleNode};
+       use bitcoin::hashes::Hash;
        use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
        use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
        use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
@@ -745,50 +731,6 @@ mod tests {
        use ln::msgs::ChannelMessageHandler;
        use util::errors::APIError;
        use util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
-       use util::test_utils::{OnRegisterOutput, TxOutReference};
-
-       /// Tests that in-block dependent transactions are processed by `block_connected` when not
-       /// included in `txdata` but returned by [`chain::Filter::register_output`]. For instance,
-       /// a (non-anchor) commitment transaction's HTLC output may be spent in the same block as the
-       /// commitment transaction itself. An Electrum client may filter the commitment transaction but
-       /// needs to return the HTLC transaction so it can be processed.
-       #[test]
-       fn connect_block_checks_dependent_transactions() {
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let channel = create_announced_chan_between_nodes(
-                       &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-
-               // Send a payment, saving nodes[0]'s revoked commitment and HTLC-Timeout transactions.
-               let (commitment_tx, htlc_tx) = {
-                       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000).0;
-                       let mut txn = get_local_commitment_txn!(nodes[0], channel.2);
-                       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
-
-                       assert_eq!(txn.len(), 2);
-                       (txn.remove(0), txn.remove(0))
-               };
-
-               // Set expectations on nodes[1]'s chain source to return dependent transactions.
-               let htlc_output = TxOutReference(commitment_tx.clone(), 0);
-               let to_local_output = TxOutReference(commitment_tx.clone(), 1);
-               let htlc_timeout_output = TxOutReference(htlc_tx.clone(), 0);
-               nodes[1].chain_source
-                       .expect(OnRegisterOutput { with: htlc_output, returns: Some((1, htlc_tx)) })
-                       .expect(OnRegisterOutput { with: to_local_output, returns: None })
-                       .expect(OnRegisterOutput { with: htlc_timeout_output, returns: None });
-
-               // Notify nodes[1] that nodes[0]'s revoked commitment transaction was mined. The chain
-               // source should return the dependent HTLC transaction when the HTLC output is registered.
-               mine_transaction(&nodes[1], &commitment_tx);
-
-               // Clean up so uninteresting assertions don't fail.
-               check_added_monitors!(nodes[1], 1);
-               nodes[1].node.get_and_clear_pending_msg_events();
-               nodes[1].node.get_and_clear_pending_events();
-       }
 
        #[test]
        fn test_async_ooo_offchain_updates() {
@@ -900,7 +842,7 @@ mod tests {
                let new_header = BlockHeader {
                        version: 2, time: 0, bits: 0, nonce: 0,
                        prev_blockhash: nodes[0].best_block_info().0,
-                       merkle_root: Default::default() };
+                       merkle_root: TxMerkleNode::all_zeros() };
                nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
                        &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
                assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
@@ -926,7 +868,7 @@ mod tests {
                        let latest_header = BlockHeader {
                                version: 2, time: 0, bits: 0, nonce: 0,
                                prev_blockhash: nodes[0].best_block_info().0,
-                               merkle_root: Default::default() };
+                               merkle_root: TxMerkleNode::all_zeros() };
                        nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
                } else {
                        let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
index 38dcd6cf0e70daf2457800fc8fc3d7ee68d16247..52cfe9972f7289388caa38d8b7244c7ec6b905bc 100644 (file)
@@ -22,6 +22,7 @@
 
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::{TxOut,Transaction};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
 use bitcoin::blockdata::script::{Script, Builder};
 use bitcoin::blockdata::opcodes;
 
@@ -54,11 +55,17 @@ use util::events::Event;
 use prelude::*;
 use core::{cmp, mem};
 use io::{self, Error};
+use core::convert::TryInto;
 use core::ops::Deref;
 use sync::Mutex;
 
-/// An update generated by the underlying Channel itself which contains some new information the
-/// ChannelMonitor should be made aware of.
+/// An update generated by the underlying channel itself which contains some new information the
+/// [`ChannelMonitor`] should be made aware of.
+///
+/// Because this represents only a small number of updates to the underlying state, it is generally
+/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
+/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
+/// transaction), a single update may reach upwards of 1 MiB in serialized size.
 #[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq))]
 #[derive(Clone)]
 #[must_use]
@@ -315,6 +322,7 @@ struct OnchainEventEntry {
        txid: Txid,
        height: u32,
        event: OnchainEvent,
+       transaction: Option<Transaction>, // Added as optional, but always filled in, in LDK 0.0.110
 }
 
 impl OnchainEventEntry {
@@ -344,6 +352,11 @@ impl OnchainEventEntry {
        }
 }
 
+/// The (output index, sats value) for the counterparty's output in a commitment transaction.
+///
+/// This was added as an `Option` in 0.0.110.
+type CommitmentTxCounterpartyOutputInfo = Option<(u32, u64)>;
+
 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
 #[derive(PartialEq)]
@@ -362,6 +375,8 @@ enum OnchainEvent {
                /// transaction which appeared on chain.
                commitment_tx_output_idx: Option<u32>,
        },
+       /// An output waiting on [`ANTI_REORG_DELAY`] confirmations before we hand the user the
+       /// [`SpendableOutputDescriptor`].
        MaturingOutput {
                descriptor: SpendableOutputDescriptor,
        },
@@ -371,6 +386,12 @@ enum OnchainEvent {
                /// The CSV delay for the output of the funding spend transaction (implying it is a local
                /// commitment transaction, and this is the delay on the to_self output).
                on_local_output_csv: Option<u16>,
+               /// If the funding spend transaction was a known remote commitment transaction, we track
+               /// the output index and amount of the counterparty's `to_self` output here.
+               ///
+               /// This allows us to generate a [`Balance::CounterpartyRevokedOutputClaimable`] for the
+               /// counterparty output.
+               commitment_tx_to_counterparty_output: CommitmentTxCounterpartyOutputInfo,
        },
        /// A spend of a commitment transaction HTLC output, set in the cases where *no* `HTLCUpdate`
        /// is constructed. This is used when
@@ -380,6 +401,9 @@ enum OnchainEvent {
        ///  * an inbound HTLC is claimed by us (with a preimage).
        ///  * a revoked-state HTLC transaction was broadcasted, which was claimed by the revocation
        ///    signature.
+       ///  * a revoked-state HTLC transaction was broadcasted, which was claimed by an
+       ///    HTLC-Success/HTLC-Failure transaction (and is still claimable with a revocation
+       ///    signature).
        HTLCSpendConfirmation {
                commitment_tx_output_idx: u32,
                /// If the claim was made by either party with a preimage, this is filled in
@@ -395,6 +419,7 @@ impl Writeable for OnchainEventEntry {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(writer, {
                        (0, self.txid, required),
+                       (1, self.transaction, option),
                        (2, self.height, required),
                        (4, self.event, required),
                });
@@ -404,16 +429,18 @@ impl Writeable for OnchainEventEntry {
 
 impl MaybeReadable for OnchainEventEntry {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
-               let mut txid = Default::default();
+               let mut txid = Txid::all_zeros();
+               let mut transaction = None;
                let mut height = 0;
                let mut event = None;
                read_tlv_fields!(reader, {
                        (0, txid, required),
+                       (1, transaction, option),
                        (2, height, required),
                        (4, event, ignorable),
                });
                if let Some(ev) = event {
-                       Ok(Some(Self { txid, height, event: ev }))
+                       Ok(Some(Self { txid, transaction, height, event: ev }))
                } else {
                        Ok(None)
                }
@@ -432,6 +459,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
        },
        (3, FundingSpendConfirmation) => {
                (0, on_local_output_csv, option),
+               (1, commitment_tx_to_counterparty_output, option),
        },
        (5, HTLCSpendConfirmation) => {
                (0, commitment_tx_output_idx, required),
@@ -555,26 +583,54 @@ pub enum Balance {
        /// HTLCs which we sent to our counterparty which are claimable after a timeout (less on-chain
        /// fees) if the counterparty does not know the preimage for the HTLCs. These are somewhat
        /// likely to be claimed by our counterparty before we do.
-       MaybeClaimableHTLCAwaitingTimeout {
-               /// The amount available to claim, in satoshis, excluding the on-chain fees which will be
-               /// required to do so.
+       MaybeTimeoutClaimableHTLC {
+               /// The amount potentially available to claim, in satoshis, excluding the on-chain fees
+               /// which will be required to do so.
                claimable_amount_satoshis: u64,
                /// The height at which we will be able to claim the balance if our counterparty has not
                /// done so.
                claimable_height: u32,
        },
+       /// HTLCs which we received from our counterparty which are claimable with a preimage which we
+       /// do not currently have. This will only be claimable if we receive the preimage from the node
+       /// to which we forwarded this HTLC before the timeout.
+       MaybePreimageClaimableHTLC {
+               /// The amount potentially available to claim, in satoshis, excluding the on-chain fees
+               /// which will be required to do so.
+               claimable_amount_satoshis: u64,
+               /// The height at which our counterparty will be able to claim the balance if we have not
+               /// yet received the preimage and claimed it ourselves.
+               expiry_height: u32,
+       },
+       /// The channel has been closed, and our counterparty broadcasted a revoked commitment
+       /// transaction.
+       ///
+       /// Thus, we're able to claim all outputs in the commitment transaction, one of which has the
+       /// following amount.
+       CounterpartyRevokedOutputClaimable {
+               /// The amount, in satoshis, of the output which we can claim.
+               ///
+               /// Note that for outputs from HTLC balances this may be excluding some on-chain fees that
+               /// were already spent.
+               claimable_amount_satoshis: u64,
+       },
 }
 
 /// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
 #[derive(PartialEq)]
 struct IrrevocablyResolvedHTLC {
        commitment_tx_output_idx: u32,
+       /// The txid of the transaction which resolved the HTLC, this may be a commitment (if the HTLC
+       /// was not present in the confirmed commitment transaction), HTLC-Success, or HTLC-Timeout
+       /// transaction.
+       resolving_txid: Option<Txid>, // Added as optional, but always filled in, in 0.0.110
        /// Only set if the HTLC claim was ours using a payment preimage
        payment_preimage: Option<PaymentPreimage>,
 }
 
 impl_writeable_tlv_based!(IrrevocablyResolvedHTLC, {
        (0, commitment_tx_output_idx, required),
+       (1, resolving_txid, option),
        (2, payment_preimage, option),
 });
 
@@ -710,6 +766,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
        funding_spend_seen: bool,
 
        funding_spend_confirmed: Option<Txid>,
+       confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo,
        /// The set of HTLCs which have been either claimed or failed on chain and have reached
        /// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the
        /// spending CSV for revocable outputs).
@@ -779,6 +836,7 @@ impl<Signer: Sign> PartialEq for ChannelMonitorImpl<Signer> {
                        self.holder_tx_signed != other.holder_tx_signed ||
                        self.funding_spend_seen != other.funding_spend_seen ||
                        self.funding_spend_confirmed != other.funding_spend_confirmed ||
+                       self.confirmed_commitment_tx_counterparty_output != other.confirmed_commitment_tx_counterparty_output ||
                        self.htlcs_resolved_on_chain != other.htlcs_resolved_on_chain
                {
                        false
@@ -958,6 +1016,7 @@ impl<Signer: Sign> Writeable for ChannelMonitorImpl<Signer> {
                        (5, self.pending_monitor_events, vec_type),
                        (7, self.funding_spend_seen, required),
                        (9, self.counterparty_node_id, option),
+                       (11, self.confirmed_commitment_tx_counterparty_output, option),
                });
 
                Ok(())
@@ -1064,6 +1123,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                        holder_tx_signed: false,
                        funding_spend_seen: false,
                        funding_spend_confirmed: None,
+                       confirmed_commitment_tx_counterparty_output: None,
                        htlcs_resolved_on_chain: Vec::new(),
 
                        best_block,
@@ -1214,7 +1274,11 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                self.inner.lock().unwrap().get_cur_holder_commitment_number()
        }
 
-       pub(crate) fn get_counterparty_node_id(&self) -> Option<PublicKey> {
+       /// Gets the `node_id` of the counterparty for this channel.
+       ///
+       /// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
+       /// otherwise.
+       pub fn get_counterparty_node_id(&self) -> Option<PublicKey> {
                self.inner.lock().unwrap().counterparty_node_id
        }
 
@@ -1379,7 +1443,155 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
        pub fn current_best_block(&self) -> BestBlock {
                self.inner.lock().unwrap().best_block.clone()
        }
+}
+
+impl<Signer: Sign> ChannelMonitorImpl<Signer> {
+       /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up
+       /// to one `Balance` for the HTLC.
+       fn get_htlc_balance(&self, htlc: &HTLCOutputInCommitment, holder_commitment: bool,
+               counterparty_revoked_commitment: bool, confirmed_txid: Option<Txid>)
+       -> Option<Balance> {
+               let htlc_commitment_tx_output_idx =
+                       if let Some(v) = htlc.transaction_output_index { v } else { return None; };
+
+               let mut htlc_spend_txid_opt = None;
+               let mut holder_timeout_spend_pending = None;
+               let mut htlc_spend_pending = None;
+               let mut holder_delayed_output_pending = None;
+               for event in self.onchain_events_awaiting_threshold_conf.iter() {
+                       match event.event {
+                               OnchainEvent::HTLCUpdate { commitment_tx_output_idx, htlc_value_satoshis, .. }
+                               if commitment_tx_output_idx == Some(htlc_commitment_tx_output_idx) => {
+                                       debug_assert!(htlc_spend_txid_opt.is_none());
+                                       htlc_spend_txid_opt = event.transaction.as_ref().map(|tx| tx.txid());
+                                       debug_assert!(holder_timeout_spend_pending.is_none());
+                                       debug_assert_eq!(htlc_value_satoshis.unwrap(), htlc.amount_msat / 1000);
+                                       holder_timeout_spend_pending = Some(event.confirmation_threshold());
+                               },
+                               OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. }
+                               if commitment_tx_output_idx == htlc_commitment_tx_output_idx => {
+                                       debug_assert!(htlc_spend_txid_opt.is_none());
+                                       htlc_spend_txid_opt = event.transaction.as_ref().map(|tx| tx.txid());
+                                       debug_assert!(htlc_spend_pending.is_none());
+                                       htlc_spend_pending = Some((event.confirmation_threshold(), preimage.is_some()));
+                               },
+                               OnchainEvent::MaturingOutput {
+                                       descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) }
+                               if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx => {
+                                       debug_assert!(holder_delayed_output_pending.is_none());
+                                       holder_delayed_output_pending = Some(event.confirmation_threshold());
+                               },
+                               _ => {},
+                       }
+               }
+               let htlc_resolved = self.htlcs_resolved_on_chain.iter()
+                       .find(|v| if v.commitment_tx_output_idx == htlc_commitment_tx_output_idx {
+                               debug_assert!(htlc_spend_txid_opt.is_none());
+                               htlc_spend_txid_opt = v.resolving_txid;
+                               true
+                       } else { false });
+               debug_assert!(holder_timeout_spend_pending.is_some() as u8 + htlc_spend_pending.is_some() as u8 + htlc_resolved.is_some() as u8 <= 1);
+
+               let htlc_output_to_spend =
+                       if let Some(txid) = htlc_spend_txid_opt {
+                               debug_assert!(
+                                       self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_none(),
+                                       "This code needs updating for anchors");
+                               BitcoinOutPoint::new(txid, 0)
+                       } else {
+                               BitcoinOutPoint::new(confirmed_txid.unwrap(), htlc_commitment_tx_output_idx)
+                       };
+               let htlc_output_spend_pending = self.onchain_tx_handler.is_output_spend_pending(&htlc_output_to_spend);
+
+               if let Some(conf_thresh) = holder_delayed_output_pending {
+                       debug_assert!(holder_commitment);
+                       return Some(Balance::ClaimableAwaitingConfirmations {
+                               claimable_amount_satoshis: htlc.amount_msat / 1000,
+                               confirmation_height: conf_thresh,
+                       });
+               } else if htlc_resolved.is_some() && !htlc_output_spend_pending {
+                       // Funding transaction spends should be fully confirmed by the time any
+                       // HTLC transactions are resolved, unless we're talking about a holder
+                       // commitment tx, whose resolution is delayed until the CSV timeout is
+                       // reached, even though HTLCs may be resolved after only
+                       // ANTI_REORG_DELAY confirmations.
+                       debug_assert!(holder_commitment || self.funding_spend_confirmed.is_some());
+               } else if counterparty_revoked_commitment {
+                       let htlc_output_claim_pending = self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+                               if let OnchainEvent::MaturingOutput {
+                                       descriptor: SpendableOutputDescriptor::StaticOutput { .. }
+                               } = &event.event {
+                                       if event.transaction.as_ref().map(|tx| tx.input.iter().any(|inp| {
+                                               if let Some(htlc_spend_txid) = htlc_spend_txid_opt {
+                                                       Some(tx.txid()) == htlc_spend_txid_opt ||
+                                                               inp.previous_output.txid == htlc_spend_txid
+                                               } else {
+                                                       Some(inp.previous_output.txid) == confirmed_txid &&
+                                                               inp.previous_output.vout == htlc_commitment_tx_output_idx
+                                               }
+                                       })).unwrap_or(false) {
+                                               Some(())
+                                       } else { None }
+                               } else { None }
+                       });
+                       if htlc_output_claim_pending.is_some() {
+                               // We already push `Balance`s onto the `res` list for every
+                               // `StaticOutput` in a `MaturingOutput` in the revoked
+                               // counterparty commitment transaction case generally, so don't
+                               // need to do so again here.
+                       } else {
+                               debug_assert!(holder_timeout_spend_pending.is_none(),
+                                       "HTLCUpdate OnchainEvents should never appear for preimage claims");
+                               debug_assert!(!htlc.offered || htlc_spend_pending.is_none() || !htlc_spend_pending.unwrap().1,
+                                       "We don't (currently) generate preimage claims against revoked outputs, where did you get one?!");
+                               return Some(Balance::CounterpartyRevokedOutputClaimable {
+                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
+                               });
+                       }
+               } else if htlc.offered == holder_commitment {
+                       // If the payment was outbound, check if there's an HTLCUpdate
+                       // indicating we have spent this HTLC with a timeout, claiming it back
+                       // and awaiting confirmations on it.
+                       if let Some(conf_thresh) = holder_timeout_spend_pending {
+                               return Some(Balance::ClaimableAwaitingConfirmations {
+                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
+                                       confirmation_height: conf_thresh,
+                               });
+                       } else {
+                               return Some(Balance::MaybeTimeoutClaimableHTLC {
+                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
+                                       claimable_height: htlc.cltv_expiry,
+                               });
+                       }
+               } else if self.payment_preimages.get(&htlc.payment_hash).is_some() {
+                       // Otherwise (the payment was inbound), only expose it as claimable if
+                       // we know the preimage.
+                       // Note that if there is a pending claim, but it did not use the
+                       // preimage, we lost funds to our counterparty! We will then continue
+                       // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
+                       debug_assert!(holder_timeout_spend_pending.is_none());
+                       if let Some((conf_thresh, true)) = htlc_spend_pending {
+                               return Some(Balance::ClaimableAwaitingConfirmations {
+                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
+                                       confirmation_height: conf_thresh,
+                               });
+                       } else {
+                               return Some(Balance::ContentiousClaimable {
+                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
+                                       timeout_height: htlc.cltv_expiry,
+                               });
+                       }
+               } else if htlc_resolved.is_none() {
+                       return Some(Balance::MaybePreimageClaimableHTLC {
+                               claimable_amount_satoshis: htlc.amount_msat / 1000,
+                               expiry_height: htlc.cltv_expiry,
+                       });
+               }
+               None
+       }
+}
 
+impl<Signer: Sign> ChannelMonitor<Signer> {
        /// Gets the balances in this channel which are either claimable by us if we were to
        /// force-close the channel now or which are claimable on-chain (possibly awaiting
        /// confirmation).
@@ -1389,9 +1601,9 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
        /// balance, or until our counterparty has claimed the balance and accrued several
        /// confirmations on the claim transaction.
        ///
-       /// Note that the balances available when you or your counterparty have broadcasted revoked
-       /// state(s) may not be fully captured here.
-       // TODO, fix that ^
+       /// Note that for `ChannelMonitors` which track a channel which went on-chain with versions of
+       /// LDK prior to 0.0.108, balances may not be fully captured if our counterparty broadcasted
+       /// a revoked state.
        ///
        /// See [`Balance`] for additional details on the types of claimable balances which
        /// may be returned here and their meanings.
@@ -1400,9 +1612,13 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                let us = self.inner.lock().unwrap();
 
                let mut confirmed_txid = us.funding_spend_confirmed;
+               let mut confirmed_counterparty_output = us.confirmed_commitment_tx_counterparty_output;
                let mut pending_commitment_tx_conf_thresh = None;
                let funding_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                       if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
+                       if let OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } =
+                               event.event
+                       {
+                               confirmed_counterparty_output = commitment_tx_to_counterparty_output;
                                Some((event.txid, event.confirmation_threshold()))
                        } else { None }
                });
@@ -1414,71 +1630,12 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                }
 
                macro_rules! walk_htlcs {
-                       ($holder_commitment: expr, $htlc_iter: expr) => {
+                       ($holder_commitment: expr, $counterparty_revoked_commitment: expr, $htlc_iter: expr) => {
                                for htlc in $htlc_iter {
-                                       if let Some(htlc_commitment_tx_output_idx) = htlc.transaction_output_index {
-                                               if let Some(conf_thresh) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                       if let OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) } = &event.event {
-                                                               if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx { Some(event.confirmation_threshold()) } else { None }
-                                                       } else { None }
-                                               }) {
-                                                       debug_assert!($holder_commitment);
-                                                       res.push(Balance::ClaimableAwaitingConfirmations {
-                                                               claimable_amount_satoshis: htlc.amount_msat / 1000,
-                                                               confirmation_height: conf_thresh,
-                                                       });
-                                               } else if us.htlcs_resolved_on_chain.iter().any(|v| v.commitment_tx_output_idx == htlc_commitment_tx_output_idx) {
-                                                       // Funding transaction spends should be fully confirmed by the time any
-                                                       // HTLC transactions are resolved, unless we're talking about a holder
-                                                       // commitment tx, whose resolution is delayed until the CSV timeout is
-                                                       // reached, even though HTLCs may be resolved after only
-                                                       // ANTI_REORG_DELAY confirmations.
-                                                       debug_assert!($holder_commitment || us.funding_spend_confirmed.is_some());
-                                               } else if htlc.offered == $holder_commitment {
-                                                       // If the payment was outbound, check if there's an HTLCUpdate
-                                                       // indicating we have spent this HTLC with a timeout, claiming it back
-                                                       // and awaiting confirmations on it.
-                                                       let htlc_update_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
-                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
-                                                                               Some(event.confirmation_threshold()) } else { None }
-                                                               } else { None }
-                                                       });
-                                                       if let Some(conf_thresh) = htlc_update_pending {
-                                                               res.push(Balance::ClaimableAwaitingConfirmations {
-                                                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
-                                                                       confirmation_height: conf_thresh,
-                                                               });
-                                                       } else {
-                                                               res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
-                                                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
-                                                                       claimable_height: htlc.cltv_expiry,
-                                                               });
-                                                       }
-                                               } else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
-                                                       // Otherwise (the payment was inbound), only expose it as claimable if
-                                                       // we know the preimage.
-                                                       // Note that if there is a pending claim, but it did not use the
-                                                       // preimage, we lost funds to our counterparty! We will then continue
-                                                       // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
-                                                       let htlc_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } = event.event {
-                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
-                                                                               Some((event.confirmation_threshold(), preimage.is_some()))
-                                                                       } else { None }
-                                                               } else { None }
-                                                       });
-                                                       if let Some((conf_thresh, true)) = htlc_spend_pending {
-                                                               res.push(Balance::ClaimableAwaitingConfirmations {
-                                                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
-                                                                       confirmation_height: conf_thresh,
-                                                               });
-                                                       } else {
-                                                               res.push(Balance::ContentiousClaimable {
-                                                                       claimable_amount_satoshis: htlc.amount_msat / 1000,
-                                                                       timeout_height: htlc.cltv_expiry,
-                                                               });
-                                                       }
+                                       if htlc.transaction_output_index.is_some() {
+
+                                               if let Some(bal) = us.get_htlc_balance(htlc, $holder_commitment, $counterparty_revoked_commitment, confirmed_txid) {
+                                                       res.push(bal);
                                                }
                                        }
                                }
@@ -1487,8 +1644,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
 
                if let Some(txid) = confirmed_txid {
                        let mut found_commitment_tx = false;
-                       if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
-                               walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().map(|(a, _)| a));
+                       if let Some(counterparty_tx_htlcs) = us.counterparty_claimable_outpoints.get(&txid) {
+                               // First look for the to_remote output back to us.
                                if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
                                        if let Some(value) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
                                                if let OnchainEvent::MaturingOutput {
@@ -1507,9 +1664,50 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                // confirmation with the same height or have never met our dust amount.
                                        }
                                }
+                               if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
+                                       walk_htlcs!(false, false, counterparty_tx_htlcs.iter().map(|(a, _)| a));
+                               } else {
+                                       walk_htlcs!(false, true, counterparty_tx_htlcs.iter().map(|(a, _)| a));
+                                       // The counterparty broadcasted a revoked state!
+                                       // Look for any StaticOutputs first, generating claimable balances for those.
+                                       // If any match the confirmed counterparty revoked to_self output, skip
+                                       // generating a CounterpartyRevokedOutputClaimable.
+                                       let mut spent_counterparty_output = false;
+                                       for event in us.onchain_events_awaiting_threshold_conf.iter() {
+                                               if let OnchainEvent::MaturingOutput {
+                                                       descriptor: SpendableOutputDescriptor::StaticOutput { output, .. }
+                                               } = &event.event {
+                                                       res.push(Balance::ClaimableAwaitingConfirmations {
+                                                               claimable_amount_satoshis: output.value,
+                                                               confirmation_height: event.confirmation_threshold(),
+                                                       });
+                                                       if let Some(confirmed_to_self_idx) = confirmed_counterparty_output.map(|(idx, _)| idx) {
+                                                               if event.transaction.as_ref().map(|tx|
+                                                                       tx.input.iter().any(|inp| inp.previous_output.vout == confirmed_to_self_idx)
+                                                               ).unwrap_or(false) {
+                                                                       spent_counterparty_output = true;
+                                                               }
+                                                       }
+                                               }
+                                       }
+
+                                       if spent_counterparty_output {
+                                       } else if let Some((confirmed_to_self_idx, amt)) = confirmed_counterparty_output {
+                                               let output_spendable = us.onchain_tx_handler
+                                                       .is_output_spend_pending(&BitcoinOutPoint::new(txid, confirmed_to_self_idx));
+                                               if output_spendable {
+                                                       res.push(Balance::CounterpartyRevokedOutputClaimable {
+                                                               claimable_amount_satoshis: amt,
+                                                       });
+                                               }
+                                       } else {
+                                               // Counterparty output is missing, either it was broadcasted on a
+                                               // previous version of LDK or the counterparty hadn't met dust.
+                                       }
+                               }
                                found_commitment_tx = true;
                        } else if txid == us.current_holder_commitment_tx.txid {
-                               walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, _)| a));
+                               walk_htlcs!(true, false, us.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, _)| a));
                                if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
                                        res.push(Balance::ClaimableAwaitingConfirmations {
                                                claimable_amount_satoshis: us.current_holder_commitment_tx.to_self_value_sat,
@@ -1519,7 +1717,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                found_commitment_tx = true;
                        } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
                                if txid == prev_commitment.txid {
-                                       walk_htlcs!(true, prev_commitment.htlc_outputs.iter().map(|(a, _, _)| a));
+                                       walk_htlcs!(true, false, prev_commitment.htlc_outputs.iter().map(|(a, _, _)| a));
                                        if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
                                                res.push(Balance::ClaimableAwaitingConfirmations {
                                                        claimable_amount_satoshis: prev_commitment.to_self_value_sat,
@@ -1540,19 +1738,24 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                        });
                                }
                        }
-                       // TODO: Add logic to provide claimable balances for counterparty broadcasting revoked
-                       // outputs.
                } else {
                        let mut claimable_inbound_htlc_value_sat = 0;
                        for (htlc, _, _) in us.current_holder_commitment_tx.htlc_outputs.iter() {
                                if htlc.transaction_output_index.is_none() { continue; }
                                if htlc.offered {
-                                       res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+                                       res.push(Balance::MaybeTimeoutClaimableHTLC {
                                                claimable_amount_satoshis: htlc.amount_msat / 1000,
                                                claimable_height: htlc.cltv_expiry,
                                        });
                                } else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
                                        claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000;
+                               } else {
+                                       // As long as the HTLC is still in our latest commitment state, treat
+                                       // it as potentially claimable, even if it has long-since expired.
+                                       res.push(Balance::MaybePreimageClaimableHTLC {
+                                               claimable_amount_satoshis: htlc.amount_msat / 1000,
+                                               expiry_height: htlc.cltv_expiry,
+                                       });
                                }
                        }
                        res.push(Balance::ClaimableOnChannelClose {
@@ -1679,8 +1882,10 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
 /// as long as we examine both the current counterparty commitment transaction and, if it hasn't
 /// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC.
 macro_rules! fail_unbroadcast_htlcs {
-       ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr,
+       ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, $commitment_tx_confirmed: expr,
         $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
+               debug_assert_eq!($commitment_tx_confirmed.txid(), $commitment_txid_confirmed);
+
                macro_rules! check_htlc_fails {
                        ($txid: expr, $commitment_tx: expr) => {
                                if let Some(ref latest_outpoints) = $self.counterparty_claimable_outpoints.get($txid) {
@@ -1720,6 +1925,7 @@ macro_rules! fail_unbroadcast_htlcs {
                                                        });
                                                        let entry = OnchainEventEntry {
                                                                txid: $commitment_txid_confirmed,
+                                                               transaction: Some($commitment_tx_confirmed.clone()),
                                                                height: $commitment_tx_conf_height,
                                                                event: OnchainEvent::HTLCUpdate {
                                                                        source: (**source).clone(),
@@ -1752,12 +1958,12 @@ macro_rules! fail_unbroadcast_htlcs {
 
 #[cfg(test)]
 pub fn deliberately_bogus_accepted_htlc_witness_program() -> Vec<u8> {
-       let mut ret = [opcodes::all::OP_NOP.into_u8(); 136];
-       ret[131] = opcodes::all::OP_DROP.into_u8();
-       ret[132] = opcodes::all::OP_DROP.into_u8();
-       ret[133] = opcodes::all::OP_DROP.into_u8();
-       ret[134] = opcodes::all::OP_DROP.into_u8();
-       ret[135] = opcodes::OP_TRUE.into_u8();
+       let mut ret = [opcodes::all::OP_NOP.to_u8(); 136];
+       ret[131] = opcodes::all::OP_DROP.to_u8();
+       ret[132] = opcodes::all::OP_DROP.to_u8();
+       ret[133] = opcodes::all::OP_DROP.to_u8();
+       ret[134] = opcodes::all::OP_DROP.to_u8();
+       ret[135] = opcodes::OP_TRUE.to_u8();
        Vec::from(&ret[..])
 }
 
@@ -1907,7 +2113,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                // First check if a counterparty commitment transaction has been broadcasted:
                macro_rules! claim_htlcs {
                        ($commitment_number: expr, $txid: expr) => {
-                               let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None);
+                               let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
                                self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
                        }
                }
@@ -2086,13 +2292,18 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
        /// data in counterparty_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
        /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
        /// HTLC-Success/HTLC-Timeout transactions.
-       /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
-       /// revoked counterparty commitment tx
-       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<PackageTemplate>, TransactionOutputs) where L::Target: Logger {
+       ///
+       /// Returns packages to claim the revoked output(s), as well as additional outputs to watch and
+       /// general information about the output that is to the counterparty in the commitment
+       /// transaction.
+       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L)
+               -> (Vec<PackageTemplate>, TransactionOutputs, CommitmentTxCounterpartyOutputInfo)
+       where L::Target: Logger {
                // Most secp and related errors trying to create keys means we have no hope of constructing
                // a spend transaction...so we return no transactions to broadcast
                let mut claimable_outpoints = Vec::new();
                let mut watch_outputs = Vec::new();
+               let mut to_counterparty_output_info = None;
 
                let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
                let per_commitment_option = self.counterparty_claimable_outpoints.get(&commitment_txid);
@@ -2101,12 +2312,12 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        ( $thing : expr ) => {
                                match $thing {
                                        Ok(a) => a,
-                                       Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs))
+                                       Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs), to_counterparty_output_info)
                                }
                        };
                }
 
-               let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+               let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence.0 as u64 & 0xffffff) << 3*8) | (tx.lock_time.0 as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
                if commitment_number >= self.get_min_seen_secret() {
                        let secret = self.get_secret(commitment_number).unwrap();
                        let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
@@ -2123,6 +2334,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv);
                                        let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height);
                                        claimable_outpoints.push(justice_package);
+                                       to_counterparty_output_info =
+                                               Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value));
                                }
                        }
 
@@ -2132,7 +2345,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        if let Some(transaction_output_index) = htlc.transaction_output_index {
                                                if transaction_output_index as usize >= tx.output.len() ||
                                                                tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
-                                                       return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
+                                                       // per_commitment_data is corrupt or our commitment signing key leaked!
+                                                       return (claimable_outpoints, (commitment_txid, watch_outputs),
+                                                               to_counterparty_output_info);
                                                }
                                                let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some());
                                                let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height);
@@ -2151,13 +2366,13 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
 
                                if let Some(per_commitment_data) = per_commitment_option {
-                                       fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, height,
+                                       fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, tx, height,
                                                per_commitment_data.iter().map(|(htlc, htlc_source)|
                                                        (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
                                                ), logger);
                                } else {
                                        debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn");
-                                       fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, height,
+                                       fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height,
                                                [].iter().map(|reference| *reference), logger);
                                }
                        }
@@ -2175,68 +2390,112 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
 
                        log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
-                       fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, height,
+                       fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height,
                                per_commitment_data.iter().map(|(htlc, htlc_source)|
                                        (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
                                ), logger);
 
-                       let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx));
+                       let (htlc_claim_reqs, counterparty_output_info) =
+                               self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx));
+                       to_counterparty_output_info = counterparty_output_info;
                        for req in htlc_claim_reqs {
                                claimable_outpoints.push(req);
                        }
 
                }
-               (claimable_outpoints, (commitment_txid, watch_outputs))
+               (claimable_outpoints, (commitment_txid, watch_outputs), to_counterparty_output_info)
        }
 
-       fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<PackageTemplate> {
+       /// Returns the HTLC claim package templates and the counterparty output info
+       fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>)
+       -> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo) {
                let mut claimable_outpoints = Vec::new();
-               if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
-                       if let Some(per_commitment_points) = self.their_cur_per_commitment_points {
-                               let per_commitment_point_option =
-                                       // If the counterparty commitment tx is the latest valid state, use their latest
-                                       // per-commitment point
-                                       if per_commitment_points.0 == commitment_number { Some(&per_commitment_points.1) }
-                                       else if let Some(point) = per_commitment_points.2.as_ref() {
-                                               // If counterparty commitment tx is the state previous to the latest valid state, use
-                                               // their previous per-commitment point (non-atomicity of revocation means it's valid for
-                                               // them to temporarily have two valid commitment txns from our viewpoint)
-                                               if per_commitment_points.0 == commitment_number + 1 { Some(point) } else { None }
-                                       } else { None };
-                               if let Some(per_commitment_point) = per_commitment_point_option {
-                                       for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
-                                               if let Some(transaction_output_index) = htlc.transaction_output_index {
-                                                       if let Some(transaction) = tx {
-                                                               if transaction_output_index as usize >= transaction.output.len() ||
-                                                                       transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
-                                                                               return claimable_outpoints; // Corrupted per_commitment_data, fuck this user
-                                                                       }
-                                                       }
-                                                       let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
-                                                       if preimage.is_some() || !htlc.offered {
-                                                               let counterparty_htlc_outp = if htlc.offered {
-                                                                       PackageSolvingData::CounterpartyOfferedHTLCOutput(
-                                                                               CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
-                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
-                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
-                                                                                       preimage.unwrap(), htlc.clone()))
-                                                               } else {
-                                                                       PackageSolvingData::CounterpartyReceivedHTLCOutput(
-                                                                               CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
-                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
-                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
-                                                                                       htlc.clone()))
-                                                               };
-                                                               let aggregation = if !htlc.offered { false } else { true };
-                                                               let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
-                                                               claimable_outpoints.push(counterparty_package);
-                                                       }
-                                               }
+               let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None;
+
+               let htlc_outputs = match self.counterparty_claimable_outpoints.get(&commitment_txid) {
+                       Some(outputs) => outputs,
+                       None => return (claimable_outpoints, to_counterparty_output_info),
+               };
+               let per_commitment_points = match self.their_cur_per_commitment_points {
+                       Some(points) => points,
+                       None => return (claimable_outpoints, to_counterparty_output_info),
+               };
+
+               let per_commitment_point =
+                       // If the counterparty commitment tx is the latest valid state, use their latest
+                       // per-commitment point
+                       if per_commitment_points.0 == commitment_number { &per_commitment_points.1 }
+                       else if let Some(point) = per_commitment_points.2.as_ref() {
+                               // If counterparty commitment tx is the state previous to the latest valid state, use
+                               // their previous per-commitment point (non-atomicity of revocation means it's valid for
+                               // them to temporarily have two valid commitment txns from our viewpoint)
+                               if per_commitment_points.0 == commitment_number + 1 {
+                                       point
+                               } else { return (claimable_outpoints, to_counterparty_output_info); }
+                       } else { return (claimable_outpoints, to_counterparty_output_info); };
+
+               if let Some(transaction) = tx {
+                       let revokeable_p2wsh_opt =
+                               if let Ok(revocation_pubkey) = chan_utils::derive_public_revocation_key(
+                                       &self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint)
+                               {
+                                       if let Ok(delayed_key) = chan_utils::derive_public_key(&self.secp_ctx,
+                                               &per_commitment_point,
+                                               &self.counterparty_commitment_params.counterparty_delayed_payment_base_key)
+                                       {
+                                               Some(chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
+                                                       self.counterparty_commitment_params.on_counterparty_tx_csv,
+                                                       &delayed_key).to_v0_p2wsh())
+                                       } else {
+                                               debug_assert!(false, "Failed to derive a delayed payment key for a commitment state we accepted");
+                                               None
                                        }
+                               } else {
+                                       debug_assert!(false, "Failed to derive a revocation pubkey key for a commitment state we accepted");
+                                       None
+                               };
+                       if let Some(revokeable_p2wsh) = revokeable_p2wsh_opt {
+                               for (idx, outp) in transaction.output.iter().enumerate() {
+                                       if outp.script_pubkey == revokeable_p2wsh {
+                                               to_counterparty_output_info =
+                                                       Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
+                                       }
+                               }
+                       }
+               }
+
+               for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
+                       if let Some(transaction_output_index) = htlc.transaction_output_index {
+                               if let Some(transaction) = tx {
+                                       if transaction_output_index as usize >= transaction.output.len() ||
+                                               transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
+                                                       // per_commitment_data is corrupt or our commitment signing key leaked!
+                                                       return (claimable_outpoints, to_counterparty_output_info);
+                                               }
+                               }
+                               let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
+                               if preimage.is_some() || !htlc.offered {
+                                       let counterparty_htlc_outp = if htlc.offered {
+                                               PackageSolvingData::CounterpartyOfferedHTLCOutput(
+                                                       CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
+                                                               self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                               self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                               preimage.unwrap(), htlc.clone()))
+                                       } else {
+                                               PackageSolvingData::CounterpartyReceivedHTLCOutput(
+                                                       CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
+                                                               self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                               self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                               htlc.clone()))
+                                       };
+                                       let aggregation = if !htlc.offered { false } else { true };
+                                       let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
+                                       claimable_outpoints.push(counterparty_package);
                                }
                        }
                }
-               claimable_outpoints
+
+               (claimable_outpoints, to_counterparty_output_info)
        }
 
        /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
@@ -2334,7 +2593,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
                        let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
                        append_onchain_update!(res, to_watch);
-                       fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, height,
+                       fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, tx, height,
                                self.current_holder_commitment_tx.htlc_outputs.iter()
                                .map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger);
                } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
@@ -2344,7 +2603,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                let res = self.get_broadcasted_holder_claims(holder_tx, height);
                                let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
                                append_onchain_update!(res, to_watch);
-                               fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, height,
+                               fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height,
                                        holder_tx.htlc_outputs.iter().map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())),
                                        logger);
                        }
@@ -2491,14 +2750,19 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
                                                log_bytes!(self.funding_info.0.to_channel_id()), tx.txid());
                                        self.funding_spend_seen = true;
-                                       if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
-                                               let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger);
+                                       let mut commitment_tx_to_counterparty_output = None;
+                                       if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
+                                               let (mut new_outpoints, new_outputs, counterparty_output_idx_sats) =
+                                                       self.check_spend_counterparty_transaction(&tx, height, &logger);
+                                               commitment_tx_to_counterparty_output = counterparty_output_idx_sats;
                                                if !new_outputs.1.is_empty() {
                                                        watch_outputs.push(new_outputs);
                                                }
                                                claimable_outpoints.append(&mut new_outpoints);
                                                if new_outpoints.is_empty() {
                                                        if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &logger) {
+                                                               debug_assert!(commitment_tx_to_counterparty_output.is_none(),
+                                                                       "A commitment transaction matched as both a counterparty and local commitment tx?");
                                                                if !new_outputs.1.is_empty() {
                                                                        watch_outputs.push(new_outputs);
                                                                }
@@ -2510,9 +2774,11 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        let txid = tx.txid();
                                        self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                txid,
+                                               transaction: Some((*tx).clone()),
                                                height: height,
                                                event: OnchainEvent::FundingSpendConfirmation {
                                                        on_local_output_csv: balance_spendable_csv,
+                                                       commitment_tx_to_counterparty_output,
                                                },
                                        });
                                } else {
@@ -2637,7 +2903,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                htlc_value_satoshis,
                                        }));
                                        if let Some(idx) = commitment_tx_output_idx {
-                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx: idx, payment_preimage: None });
+                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
+                                                       commitment_tx_output_idx: idx, resolving_txid: Some(entry.txid),
+                                                       payment_preimage: None,
+                                               });
                                        }
                                },
                                OnchainEvent::MaturingOutput { descriptor } => {
@@ -2647,10 +2916,14 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        });
                                },
                                OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
-                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx, payment_preimage: preimage });
+                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
+                                               commitment_tx_output_idx, resolving_txid: Some(entry.txid),
+                                               payment_preimage: preimage,
+                                       });
                                },
-                               OnchainEvent::FundingSpendConfirmation { .. } => {
+                               OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } => {
                                        self.funding_spend_confirmed = Some(entry.txid);
+                                       self.confirmed_commitment_tx_counterparty_output = commitment_tx_to_counterparty_output;
                                },
                        }
                }
@@ -2879,7 +3152,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
                                                        $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
                                                        if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
-                                                       if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" });
+                                                       if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back. We can likely claim the HTLC output with a revocation claim" });
                                        } else {
                                                log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
                                                        $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
@@ -2926,29 +3199,20 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        if payment_data.is_none() {
                                                                log_claim!($tx_info, $holder_tx, htlc_output, false);
                                                                let outbound_htlc = $holder_tx == htlc_output.offered;
-                                                               if !outbound_htlc || revocation_sig_claim {
-                                                                       self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
-                                                                               txid: tx.txid(), height,
-                                                                               event: OnchainEvent::HTLCSpendConfirmation {
-                                                                                       commitment_tx_output_idx: input.previous_output.vout,
-                                                                                       preimage: if accepted_preimage_claim || offered_preimage_claim {
-                                                                                               Some(payment_preimage) } else { None },
-                                                                                       // If this is a payment to us (!outbound_htlc, above),
-                                                                                       // wait for the CSV delay before dropping the HTLC from
-                                                                                       // claimable balance if the claim was an HTLC-Success
-                                                                                       // transaction.
-                                                                                       on_to_local_output_csv: if accepted_preimage_claim {
-                                                                                               Some(self.on_holder_tx_csv) } else { None },
-                                                                               },
-                                                                       });
-                                                               } else {
-                                                                       // Outbound claims should always have payment_data, unless
-                                                                       // we've already failed the HTLC as the commitment transaction
-                                                                       // which was broadcasted was revoked. In that case, we should
-                                                                       // spend the HTLC output here immediately, and expose that fact
-                                                                       // as a Balance, something which we do not yet do.
-                                                                       // TODO: Track the above as claimable!
-                                                               }
+                                                               self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
+                                                                       txid: tx.txid(), height, transaction: Some(tx.clone()),
+                                                                       event: OnchainEvent::HTLCSpendConfirmation {
+                                                                               commitment_tx_output_idx: input.previous_output.vout,
+                                                                               preimage: if accepted_preimage_claim || offered_preimage_claim {
+                                                                                       Some(payment_preimage) } else { None },
+                                                                               // If this is a payment to us (ie !outbound_htlc), wait for
+                                                                               // the CSV delay before dropping the HTLC from claimable
+                                                                               // balance if the claim was an HTLC-Success transaction (ie
+                                                                               // accepted_preimage_claim).
+                                                                               on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc {
+                                                                                       Some(self.on_holder_tx_csv) } else { None },
+                                                                       },
+                                                               });
                                                                continue 'outer_loop;
                                                        }
                                                }
@@ -2980,6 +3244,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                        txid: tx.txid(),
                                                        height,
+                                                       transaction: Some(tx.clone()),
                                                        event: OnchainEvent::HTLCSpendConfirmation {
                                                                commitment_tx_output_idx: input.previous_output.vout,
                                                                preimage: Some(payment_preimage),
@@ -3000,6 +3265,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                } else { false }) {
                                                self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                        txid: tx.txid(),
+                                                       transaction: Some(tx.clone()),
                                                        height,
                                                        event: OnchainEvent::HTLCSpendConfirmation {
                                                                commitment_tx_output_idx: input.previous_output.vout,
@@ -3026,6 +3292,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        });
                                        let entry = OnchainEventEntry {
                                                txid: tx.txid(),
+                                               transaction: Some(tx.clone()),
                                                height,
                                                event: OnchainEvent::HTLCUpdate {
                                                        source, payment_hash,
@@ -3099,6 +3366,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                if let Some(spendable_output) = spendable_output {
                        let entry = OnchainEventEntry {
                                txid: tx.txid(),
+                               transaction: Some(tx.clone()),
                                height: height,
                                event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
                        };
@@ -3359,12 +3627,14 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                let mut htlcs_resolved_on_chain = Some(Vec::new());
                let mut funding_spend_seen = Some(false);
                let mut counterparty_node_id = None;
+               let mut confirmed_commitment_tx_counterparty_output = None;
                read_tlv_fields!(reader, {
                        (1, funding_spend_confirmed, option),
                        (3, htlcs_resolved_on_chain, vec_type),
                        (5, pending_monitor_events, vec_type),
                        (7, funding_spend_seen, option),
                        (9, counterparty_node_id, option),
+                       (11, confirmed_commitment_tx_counterparty_output, option),
                });
 
                let mut secp_ctx = Secp256k1::new();
@@ -3415,6 +3685,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                        holder_tx_signed,
                        funding_spend_seen: funding_spend_seen.unwrap(),
                        funding_spend_confirmed,
+                       confirmed_commitment_tx_counterparty_output,
                        htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
 
                        best_block,
@@ -3465,7 +3736,7 @@ mod tests {
        use util::ser::{ReadableArgs, Writeable};
        use sync::{Arc, Mutex};
        use io;
-       use bitcoin::Witness;
+       use bitcoin::{PackedLockTime, Sequence, TxMerkleNode, Witness};
        use prelude::*;
 
        fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
@@ -3509,7 +3780,7 @@ mod tests {
                let new_header = BlockHeader {
                        version: 2, time: 0, bits: 0, nonce: 0,
                        prev_blockhash: nodes[0].best_block_info().0,
-                       merkle_root: Default::default() };
+                       merkle_root: TxMerkleNode::all_zeros() };
                let conf_height = nodes[0].best_block_info().1 + 1;
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
                        &[(0, broadcast_tx)], conf_height);
@@ -3569,7 +3840,7 @@ mod tests {
                let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) };
 
                let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
-               let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+               let dummy_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
 
                let mut preimages = Vec::new();
                {
@@ -3635,7 +3906,7 @@ mod tests {
                        delayed_payment_basepoint: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap()),
                        htlc_basepoint: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())
                };
-               let funding_outpoint = OutPoint { txid: Default::default(), index: u16::max_value() };
+               let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
                let channel_parameters = ChannelTransactionParameters {
                        holder_pubkeys: keys.holder_channel_pubkeys.clone(),
                        holder_selected_contest_delay: 66,
@@ -3749,7 +4020,7 @@ mod tests {
 
                // Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs
                for &opt_anchors in [false, true].iter() {
-                       let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+                       let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                        let mut sum_actual_sigs = 0;
                        for i in 0..4 {
                                claim_tx.input.push(TxIn {
@@ -3758,7 +4029,7 @@ mod tests {
                                                vout: i,
                                        },
                                        script_sig: Script::new(),
-                                       sequence: 0xfffffffd,
+                                       sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
                                        witness: Witness::new(),
                                });
                        }
@@ -3781,7 +4052,7 @@ mod tests {
 
                // Claim tx with 1 offered HTLCs, 3 received HTLCs
                for &opt_anchors in [false, true].iter() {
-                       let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+                       let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                        let mut sum_actual_sigs = 0;
                        for i in 0..4 {
                                claim_tx.input.push(TxIn {
@@ -3790,7 +4061,7 @@ mod tests {
                                                vout: i,
                                        },
                                        script_sig: Script::new(),
-                                       sequence: 0xfffffffd,
+                                       sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
                                        witness: Witness::new(),
                                });
                        }
@@ -3813,7 +4084,7 @@ mod tests {
 
                // Justice tx with 1 revoked HTLC-Success tx output
                for &opt_anchors in [false, true].iter() {
-                       let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+                       let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                        let mut sum_actual_sigs = 0;
                        claim_tx.input.push(TxIn {
                                previous_output: BitcoinOutPoint {
@@ -3821,7 +4092,7 @@ mod tests {
                                        vout: 0,
                                },
                                script_sig: Script::new(),
-                               sequence: 0xfffffffd,
+                               sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
                                witness: Witness::new(),
                        });
                        claim_tx.output.push(TxOut {
index 4231d08259abcbe1e4c50550f4b40aa7d7064faa..73b8a1b98224ace7aef2f6db05a82ca2a020e476 100644 (file)
@@ -25,10 +25,11 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::WPubkeyHash;
 
-use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Signing};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
-use bitcoin::{secp256k1, Witness};
+use bitcoin::{PackedLockTime, secp256k1, Sequence, Witness};
 
 use util::{byte_utils, transaction_utils};
 use util::crypto::{hkdf_extract_expand_twice, sign};
@@ -404,6 +405,12 @@ pub trait KeysInterface {
        /// This method must return the same value each time it is called with a given `Recipient`
        /// parameter.
        fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()>;
+       /// Gets the ECDH shared secret of our [`node secret`] and `other_key`, multiplying by `tweak` if
+       /// one is provided. Note that this tweak can be applied to `other_key` instead of our node
+       /// secret, though this is less efficient.
+       ///
+       /// [`node secret`]: Self::get_node_secret
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()>;
        /// Get a script pubkey which we send funds to when claiming on-chain contestable outputs.
        ///
        /// This method should return a different value each time it is called, to avoid linking
@@ -619,7 +626,7 @@ impl InMemorySigner {
                if spend_tx.input.len() <= input_idx { return Err(()); }
                if !spend_tx.input[input_idx].script_sig.is_empty() { return Err(()); }
                if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint() { return Err(()); }
-               if spend_tx.input[input_idx].sequence != descriptor.to_self_delay as u32 { return Err(()); }
+               if spend_tx.input[input_idx].sequence.0 != descriptor.to_self_delay as u32 { return Err(()); }
 
                let delayed_payment_key = chan_utils::derive_private_key(&secp_ctx, &descriptor.per_commitment_point, &self.delayed_payment_base_key)
                        .expect("We constructed the payment_base_key, so we can only fail here if the RNG is busted.");
@@ -1015,7 +1022,7 @@ impl KeysManager {
                                        input.push(TxIn {
                                                previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
-                                               sequence: 0,
+                                               sequence: Sequence::ZERO,
                                                witness: Witness::new(),
                                        });
                                        witness_weight += StaticPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
@@ -1026,7 +1033,7 @@ impl KeysManager {
                                        input.push(TxIn {
                                                previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
-                                               sequence: descriptor.to_self_delay as u32,
+                                               sequence: Sequence(descriptor.to_self_delay as u32),
                                                witness: Witness::new(),
                                        });
                                        witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
@@ -1037,7 +1044,7 @@ impl KeysManager {
                                        input.push(TxIn {
                                                previous_output: outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
-                                               sequence: 0,
+                                               sequence: Sequence::ZERO,
                                                witness: Witness::new(),
                                        });
                                        witness_weight += 1 + 73 + 34;
@@ -1049,7 +1056,7 @@ impl KeysManager {
                }
                let mut spend_tx = Transaction {
                        version: 2,
-                       lock_time: 0,
+                       lock_time: PackedLockTime(0),
                        input,
                        output: outputs,
                };
@@ -1133,6 +1140,14 @@ impl KeysInterface for KeysManager {
                }
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
@@ -1217,6 +1232,14 @@ impl KeysInterface for PhantomKeysManager {
                }
        }
 
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               let mut node_secret = self.get_node_secret(recipient)?;
+               if let Some(tweak) = tweak {
+                       node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
+               }
+               Ok(SharedSecret::new(other_key, &node_secret))
+       }
+
        fn get_inbound_payment_key_material(&self) -> KeyMaterial {
                self.inbound_payment_key.clone()
        }
index a0eb17c6be0051eef2d902cd09f4af4ee1ff89a7..42508569575f988bf508ae69ba64ab40abb453e7 100644 (file)
@@ -12,7 +12,7 @@
 use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::transaction::{Transaction, TxOut};
+use bitcoin::blockdata::transaction::TxOut;
 use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::network::constants::Network;
 use bitcoin::secp256k1::PublicKey;
@@ -151,15 +151,15 @@ pub trait Confirm {
        /// in the event of a chain reorganization, it must not be called with a `header` that is no
        /// longer in the chain as of the last call to [`best_block_updated`].
        ///
-       /// [chain order]: Confirm#Order
+       /// [chain order]: Confirm#order
        /// [`best_block_updated`]: Self::best_block_updated
        fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32);
 
        /// Processes a transaction that is no longer confirmed as result of a chain reorganization.
        ///
        /// Should be called for any transaction returned by [`get_relevant_txids`] if it has been
-       /// reorganized out of the best chain. Once called, the given transaction should not be returned
-       /// by [`get_relevant_txids`] unless it has been reconfirmed via [`transactions_confirmed`].
+       /// reorganized out of the best chain. Once called, the given transaction will not be returned
+       /// by [`get_relevant_txids`], unless it has been reconfirmed via [`transactions_confirmed`].
        ///
        /// [`get_relevant_txids`]: Self::get_relevant_txids
        /// [`transactions_confirmed`]: Self::transactions_confirmed
@@ -173,9 +173,9 @@ pub trait Confirm {
 
        /// Returns transactions that should be monitored for reorganization out of the chain.
        ///
-       /// Should include any transactions passed to [`transactions_confirmed`] that have insufficient
-       /// confirmations to be safe from a chain reorganization. Should not include any transactions
-       /// passed to [`transaction_unconfirmed`] unless later reconfirmed.
+       /// Will include any transactions passed to [`transactions_confirmed`] that have insufficient
+       /// confirmations to be safe from a chain reorganization. Will not include any transactions
+       /// passed to [`transaction_unconfirmed`], unless later reconfirmed.
        ///
        /// May be called to determine the subset of transactions that must still be monitored for
        /// reorganization. Will be idempotent between calls but may change as a result of calls to the
@@ -333,21 +333,18 @@ pub trait Filter {
 
        /// Registers interest in spends of a transaction output.
        ///
-       /// Optionally, when `output.block_hash` is set, should return any transaction spending the
-       /// output that is found in the corresponding block along with its index.
-       ///
-       /// This return value is useful for Electrum clients in order to supply in-block descendant
-       /// transactions which otherwise were not included. This is not necessary for other clients if
-       /// such descendant transactions were already included (e.g., when a BIP 157 client provides the
-       /// full block).
-       fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)>;
+       /// Note that this method might be called during processing of a new block. You therefore need
+       /// to ensure that also dependent output spents within an already connected block are correctly
+       /// handled, e.g., by re-scanning the block in question whenever new outputs have been
+       /// registered mid-processing.
+       fn register_output(&self, output: WatchedOutput);
 }
 
 /// A transaction output watched by a [`ChannelMonitor`] for spends on-chain.
 ///
 /// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction
 /// spending the output must be given to [`ChannelMonitor::block_connected`] either directly or via
-/// the return value of [`Filter::register_output`].
+/// [`Confirm::transactions_confirmed`].
 ///
 /// If `block_hash` is `Some`, this indicates the output was created in the corresponding block and
 /// may have been spent there. See [`Filter::register_output`] for details.
index ac01cacaa8649d6676050e3b6eb4d173026250ed..0f2edff5ed78bd3472e2f7c93ad8b2e845c62ba1 100644 (file)
@@ -38,6 +38,7 @@ use alloc::collections::BTreeMap;
 use core::cmp;
 use core::ops::Deref;
 use core::mem::replace;
+use bitcoin::hashes::Hash;
 
 const MAX_ALLOC_SIZE: usize = 64*1024;
 
@@ -92,7 +93,7 @@ impl Writeable for OnchainEventEntry {
 
 impl MaybeReadable for OnchainEventEntry {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
-               let mut txid = Default::default();
+               let mut txid = Txid::all_zeros();
                let mut height = 0;
                let mut event = None;
                read_tlv_fields!(reader, {
@@ -389,7 +390,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                if cached_request.is_malleable() {
                        let predicted_weight = cached_request.package_weight(&self.destination_script, self.channel_transaction_parameters.opt_anchors.is_some());
                        if let Some((output_value, new_feerate)) =
-                                       cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().as_sat(), fee_estimator, logger) {
+                                       cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger) {
                                assert!(new_feerate != 0);
 
                                let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
@@ -690,6 +691,10 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                }
        }
 
+       pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool {
+               self.claimable_outpoints.get(outpoint).is_some()
+       }
+
        pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
                let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
                        .iter()
index 30530303e59b366239e88c96d846f36da77a49ee..c945d8909da4a61cc656a634a75f0b130eb342d2 100644 (file)
@@ -36,7 +36,7 @@ use prelude::*;
 use core::cmp;
 use core::mem;
 use core::ops::Deref;
-use bitcoin::Witness;
+use bitcoin::{PackedLockTime, Sequence, Witness};
 
 use super::chaininterface::LowerBoundedFeeEstimator;
 
@@ -393,7 +393,7 @@ impl PackageSolvingData {
                                if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
                                        let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
 
-                                       bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
+                                       bumped_tx.lock_time = PackedLockTime(outp.htlc.cltv_expiry); // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
                                        if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
                                                let mut ser_sig = sig.serialize_der().to_vec();
                                                ser_sig.push(EcdsaSighashType::All as u8);
@@ -615,7 +615,7 @@ impl PackageTemplate {
                        PackageMalleability::Malleable => {
                                let mut bumped_tx = Transaction {
                                        version: 2,
-                                       lock_time: 0,
+                                       lock_time: PackedLockTime::ZERO,
                                        input: vec![],
                                        output: vec![TxOut {
                                                script_pubkey: destination_script,
@@ -626,7 +626,7 @@ impl PackageTemplate {
                                        bumped_tx.input.push(TxIn {
                                                previous_output: *outpoint,
                                                script_sig: Script::new(),
-                                               sequence: 0xfffffffd,
+                                               sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
                                                witness: Witness::new(),
                                        });
                                }
index daec309e9b5e35f81684ef9cdc5ed1df2be9e55c..b7466776a6e70f0f7720fdcadd3ade48a945089c 100644 (file)
@@ -68,7 +68,8 @@ struct LockMetadata {
 
 struct LockDep {
        lock: Arc<LockMetadata>,
-       lockdep_trace: Backtrace,
+       /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
+       _lockdep_trace: Backtrace,
 }
 
 #[cfg(feature = "backtrace")]
@@ -140,13 +141,13 @@ impl LockMetadata {
                                        // of the same lock.
                                        debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
                                }
-                               for (locked_dep_idx, locked_dep) in locked.locked_before.lock().unwrap().iter() {
+                               for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
                                        if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
                                                #[cfg(feature = "backtrace")]
                                                panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
                                                        get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
                                                        get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
-                                                       locked_dep.lockdep_trace);
+                                                       _locked_dep._lockdep_trace);
                                                #[cfg(not(feature = "backtrace"))]
                                                panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
                                        }
@@ -154,7 +155,7 @@ impl LockMetadata {
                                // Insert any already-held locks in our locked-before set.
                                let mut locked_before = this.locked_before.lock().unwrap();
                                if !locked_before.contains_key(&locked.lock_idx) {
-                                       let lockdep = LockDep { lock: Arc::clone(locked), lockdep_trace: Backtrace::new() };
+                                       let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
                                        locked_before.insert(lockdep.lock.lock_idx, lockdep);
                                }
                        }
@@ -175,7 +176,7 @@ impl LockMetadata {
                        let mut locked_before = this.locked_before.lock().unwrap();
                        for (locked_idx, locked) in held.borrow().iter() {
                                if !locked_before.contains_key(locked_idx) {
-                                       let lockdep = LockDep { lock: Arc::clone(locked), lockdep_trace: Backtrace::new() };
+                                       let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
                                        locked_before.insert(*locked_idx, lockdep);
                                }
                        }
index ba6d6bc71910e53ad37236dc30b6e19cd947a4d9..1000966d5a5b8ecca943fc7b7c2ef9642b027b9b 100644 (file)
@@ -17,7 +17,7 @@
 //! figure out how best to make networking happen/timers fire/things get written to disk/keys get
 //! generated/etc. This makes it a good candidate for tight integration into an existing wallet
 //! instead of having a rather-separate lightning appendage to a wallet.
-//! 
+//!
 //! `default` features are:
 //!
 //! * `std` - enables functionalities which require `std`, including `std::io` trait implementations and things which utilize time
 
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), forbid(unsafe_code))]
+
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
 
 // In general, rust is absolutely horrid at supporting users doing things like,
 // for example, compiling Rust code for real environments. Disable useless lints
@@ -76,6 +79,7 @@ pub mod util;
 pub mod chain;
 pub mod ln;
 pub mod routing;
+pub mod onion_message;
 
 #[cfg(feature = "std")]
 /// Re-export of either `core2::io` or `std::io`, depending on the `std` feature flag.
index 41d1eff856a71ed88cf8f43c14d8ffe234d27447..d53863289bc5807464739f0707fbb390c25f9869 100644 (file)
@@ -26,10 +26,10 @@ use util::ser::{Readable, Writeable, Writer};
 use util::{byte_utils, transaction_utils};
 
 use bitcoin::hash_types::WPubkeyHash;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Message};
 use bitcoin::secp256k1::Error as SecpError;
-use bitcoin::{secp256k1, Witness};
+use bitcoin::{PackedLockTime, secp256k1, Sequence, Witness};
 
 use io;
 use prelude::*;
@@ -101,7 +101,7 @@ pub fn build_closing_transaction(to_holder_value_sat: u64, to_counterparty_value
                ins.push(TxIn {
                        previous_output: funding_outpoint,
                        script_sig: Script::new(),
-                       sequence: 0xffffffff,
+                       sequence: Sequence::MAX,
                        witness: Witness::new(),
                });
                ins
@@ -132,7 +132,7 @@ pub fn build_closing_transaction(to_holder_value_sat: u64, to_counterparty_value
 
        Transaction {
                version: 2,
-               lock_time: 0,
+               lock_time: PackedLockTime::ZERO,
                input: txins,
                output: outputs,
        }
@@ -264,9 +264,7 @@ pub fn derive_private_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, per_co
        sha.input(&PublicKey::from_secret_key(&secp_ctx, &base_secret).serialize());
        let res = Sha256::from_engine(sha).into_inner();
 
-       let mut key = base_secret.clone();
-       key.add_assign(&res)?;
-       Ok(key)
+       base_secret.clone().add_tweak(&Scalar::from_be_bytes(res).unwrap())
 }
 
 /// Derives a per-commitment-transaction public key (eg an htlc key or a delayed_payment key)
@@ -313,12 +311,9 @@ pub fn derive_private_revocation_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1
                Sha256::from_engine(sha).into_inner()
        };
 
-       let mut countersignatory_contrib = countersignatory_revocation_base_secret.clone();
-       countersignatory_contrib.mul_assign(&rev_append_commit_hash_key)?;
-       let mut broadcaster_contrib = per_commitment_secret.clone();
-       broadcaster_contrib.mul_assign(&commit_append_rev_hash_key)?;
-       countersignatory_contrib.add_assign(&broadcaster_contrib[..])?;
-       Ok(countersignatory_contrib)
+       let countersignatory_contrib = countersignatory_revocation_base_secret.clone().mul_tweak(&Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())?;
+       let broadcaster_contrib = per_commitment_secret.clone().mul_tweak(&Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())?;
+       countersignatory_contrib.add_tweak(&Scalar::from_be_bytes(broadcaster_contrib.secret_bytes()).unwrap())
 }
 
 /// Derives a per-commitment-transaction revocation public key from its constituent parts. This is
@@ -348,10 +343,8 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
                Sha256::from_engine(sha).into_inner()
        };
 
-       let mut countersignatory_contrib = countersignatory_revocation_base_point.clone();
-       countersignatory_contrib.mul_assign(&secp_ctx, &rev_append_commit_hash_key)?;
-       let mut broadcaster_contrib = per_commitment_point.clone();
-       broadcaster_contrib.mul_assign(&secp_ctx, &commit_append_rev_hash_key)?;
+       let countersignatory_contrib = countersignatory_revocation_base_point.clone().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())?;
+       let broadcaster_contrib = per_commitment_point.clone().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())?;
        countersignatory_contrib.combine(&broadcaster_contrib)
 }
 
@@ -614,7 +607,7 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte
                        vout: htlc.transaction_output_index.expect("Can't build an HTLC transaction for a dust output"),
                },
                script_sig: Script::new(),
-               sequence: if opt_anchors { 1 } else { 0 },
+               sequence: Sequence(if opt_anchors { 1 } else { 0 }),
                witness: Witness::new(),
        });
 
@@ -633,7 +626,7 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte
 
        Transaction {
                version: 2,
-               lock_time: if htlc.offered { htlc.cltv_expiry } else { 0 },
+               lock_time: PackedLockTime(if htlc.offered { htlc.cltv_expiry } else { 0 }),
                input: txins,
                output: txouts,
        }
@@ -863,7 +856,7 @@ impl HolderCommitmentTransaction {
                        holder_selected_contest_delay: 0,
                        is_outbound_from_holder: false,
                        counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }),
-                       funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }),
+                       funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
                        opt_anchors: None
                };
                let mut htlcs_with_aux: Vec<(_, ())> = Vec::new();
@@ -1167,7 +1160,7 @@ impl CommitmentTransaction {
        fn make_transaction(obscured_commitment_transaction_number: u64, txins: Vec<TxIn>, outputs: Vec<TxOut>) -> Transaction {
                Transaction {
                        version: 2,
-                       lock_time: ((0x20 as u32) << 8 * 3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32),
+                       lock_time: PackedLockTime(((0x20 as u32) << 8 * 3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32)),
                        input: txins,
                        output: outputs,
                }
@@ -1291,8 +1284,8 @@ impl CommitmentTransaction {
                        ins.push(TxIn {
                                previous_output: channel_parameters.funding_outpoint(),
                                script_sig: Script::new(),
-                               sequence: ((0x80 as u32) << 8 * 3)
-                                       | ((obscured_commitment_transaction_number >> 3 * 8) as u32),
+                               sequence: Sequence(((0x80 as u32) << 8 * 3)
+                                       | ((obscured_commitment_transaction_number >> 3 * 8) as u32)),
                                witness: Witness::new(),
                        });
                        ins
@@ -1508,7 +1501,8 @@ mod tests {
        use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1};
        use util::test_utils;
        use chain::keysinterface::{KeysInterface, BaseSign};
-       use bitcoin::Network;
+       use bitcoin::{Network, Txid};
+       use bitcoin::hashes::Hash;
        use ln::PaymentHash;
        use bitcoin::hashes::hex::ToHex;
 
@@ -1533,7 +1527,7 @@ mod tests {
                        holder_selected_contest_delay: 0,
                        is_outbound_from_holder: false,
                        counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }),
-                       funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }),
+                       funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
                        opt_anchors: None
                };
 
index 02ac7b48137fd6cad28c9e37d6b8a16421ea999f..f5977abd4fca7a4bae04814217348383f0c6b342 100644 (file)
@@ -36,6 +36,8 @@ use ln::functional_test_utils::*;
 use util::test_utils;
 
 use io;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
 use prelude::*;
 use sync::{Arc, Mutex};
 
@@ -116,7 +118,14 @@ fn test_monitor_and_persister_update_fail() {
                assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
                chain_mon
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader {
+               version: 0x20000000,
+               prev_blockhash: BlockHash::all_zeros(),
+               merkle_root: TxMerkleNode::all_zeros(),
+               time: 42,
+               bits: 42,
+               nonce: 42
+       };
        chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
 
        // Set the persister's return value to be a TemporaryFailure.
index 6da5de04365ee4b1cc20f7501c62605a91d84643..624f4d6b688b0e2d81964d39e2deb77e1233af8a 100644 (file)
@@ -795,6 +795,9 @@ pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS
 /// See https://github.com/lightning/bolts/issues/905 for more details.
 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
 
+// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
+pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+
 /// Used to return a simple Error back to ChannelManager. Will get converted to a
 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
 /// channel_id in ChannelManager.
@@ -843,16 +846,25 @@ impl<Signer: Sign> Channel<Signer> {
        }
 
        /// Returns a minimum channel reserve value the remote needs to maintain,
-       /// required by us.
+       /// required by us according to the configured or default
+       /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
        ///
        /// Guaranteed to return a value no larger than channel_value_satoshis
        ///
-       /// This is used both for new channels and to figure out what reserve value we sent to peers
-       /// for channels serialized before we included our selected reserve value in the serialized
-       /// data explicitly.
-       pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+       /// This is used both for outbound and inbound channels and has lower bound
+       /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+       pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+               let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+               cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+       }
+
+       /// This is for legacy reasons, present for forward-compatibility.
+       /// LDK versions older than 0.0.104 don't know how read/handle values other than default
+       /// from storage. Hence, we use this function to not persist default values of
+       /// `holder_selected_channel_reserve_satoshis` for channels into storage.
+       pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
                let (q, _) = channel_value_satoshis.overflowing_div(100);
-               cmp::min(channel_value_satoshis, cmp::max(q, 1000)) //TODO
+               cmp::min(channel_value_satoshis, cmp::max(q, 1000))
        }
 
        pub(crate) fn opt_anchors(&self) -> bool {
@@ -912,8 +924,10 @@ impl<Signer: Sign> Channel<Signer> {
                if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
                        return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
                }
-               let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis);
+               let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       // Protocol level safety check in place, although it should never happen because
+                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
                        return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
 
@@ -1204,12 +1218,14 @@ impl<Signer: Sign> Channel<Signer> {
                        }
                }
 
-               let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
+               let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       // Protocol level safety check in place, although it should never happen because
+                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
                        return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). Channel value is ({} - {}).", holder_selected_channel_reserve_satoshis, full_channel_value_msat, msg.push_msat)));
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
                }
                if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
@@ -6107,7 +6123,7 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
                // a different percentage of the channel value then 10%, which older versions of LDK used
                // to set it to before the percentage was made configurable.
                let serialized_holder_selected_reserve =
-                       if self.holder_selected_channel_reserve_satoshis != Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
+                       if self.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
                        { Some(self.holder_selected_channel_reserve_satoshis) } else { None };
 
                let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
@@ -6382,7 +6398,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
                let mut announcement_sigs = None;
                let mut target_closing_feerate_sats_per_kw = None;
                let mut monitor_pending_finalized_fulfills = Some(Vec::new());
-               let mut holder_selected_channel_reserve_satoshis = Some(Self::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
+               let mut holder_selected_channel_reserve_satoshis = Some(Self::get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
                let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
                // Prior to supporting channel type negotiation, all of our channels were static_remotekey
                // only, so we default to that if none was written.
@@ -6562,6 +6578,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
 
 #[cfg(test)]
 mod tests {
+       use std::cmp;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::blockdata::constants::genesis_block;
@@ -6571,7 +6588,7 @@ mod tests {
        use ln::PaymentHash;
        use ln::channelmanager::{HTLCSource, PaymentId};
        use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
-       use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
+       use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use ln::features::{InitFeatures, ChannelTypeFeatures};
        use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use ln::script::ShutdownScript;
@@ -6586,14 +6603,16 @@ mod tests {
        use util::errors::APIError;
        use util::test_utils;
        use util::test_utils::OnGetShutdownScriptpubkey;
-       use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+       use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Scalar};
        use bitcoin::secp256k1::ffi::Signature as FFISignature;
        use bitcoin::secp256k1::{SecretKey,PublicKey};
+       use bitcoin::secp256k1::ecdh::SharedSecret;
        use bitcoin::secp256k1::ecdsa::RecoverableSignature;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::Hash;
        use bitcoin::hash_types::WPubkeyHash;
        use bitcoin::bech32::u5;
+       use bitcoin::PackedLockTime;
        use bitcoin::util::address::WitnessVersion;
        use prelude::*;
 
@@ -6629,6 +6648,7 @@ mod tests {
                type Signer = InMemorySigner;
 
                fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { panic!(); }
+               fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&Scalar>) -> Result<SharedSecret, ()> { panic!(); }
                fn get_inbound_payment_key_material(&self) -> KeyMaterial { panic!(); }
                fn get_destination_script(&self) -> Script {
                        let secp_ctx = Secp256k1::signing_only();
@@ -6853,7 +6873,7 @@ mod tests {
 
                // Node A --> Node B: funding created
                let output_script = node_a_chan.get_funding_redeemscript();
-               let tx = Transaction { version: 1, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+               let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
@@ -6963,6 +6983,64 @@ mod tests {
                assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
        }
 
+       #[test]
+       fn test_configured_holder_selected_channel_reserve_satoshis() {
+
+               // Test that `new_outbound` and `new_from_req` create a channel with the correct
+               // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
+               test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
+
+               // Test with valid but unreasonably high channel reserves
+               // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
+               test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
+               test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
+
+               // Test with calculated channel reserve less than lower bound
+               // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+               test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
+
+               // Test with invalid channel reserves since sum of both is greater than or equal
+               // to channel value
+               test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
+               test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
+       }
+
+       fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
+               let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
+               let logger = test_utils::TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+               let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+
+
+               let mut outbound_node_config = UserConfig::default();
+               outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
+               let chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, outbound_node_id, &InitFeatures::known(), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
+
+               let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
+               assert_eq!(chan.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
+
+               let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
+               let mut inbound_node_config = UserConfig::default();
+               inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
+
+               if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
+                       let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
+
+                       let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
+
+                       assert_eq!(chan_inbound_node.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
+                       assert_eq!(chan_inbound_node.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
+               } else {
+                       // Channel Negotiations failed
+                       let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
+                       assert!(result.is_err());
+               }
+       }
+
        #[test]
        fn channel_update() {
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
index d011d6b42c8621314dfd44fe49709bac702b6620..e8830ab5ffebef46dfab26d0cc427277e7a56ead 100644 (file)
@@ -32,7 +32,7 @@ use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
-use bitcoin::secp256k1;
+use bitcoin::{LockTime, secp256k1, Sequence};
 
 use chain;
 use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
@@ -1635,7 +1635,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Gets the current configuration applied to all new channels,  as
+       /// Gets the current configuration applied to all new channels.
        pub fn get_current_default_configuration(&self) -> &UserConfig {
                &self.default_configuration
        }
@@ -2144,17 +2144,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                })
        }
 
-       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
+       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
                macro_rules! return_malformed_err {
                        ($msg: expr, $err_code: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
                                                failure_code: $err_code,
-                                       })), self.channel_state.lock().unwrap());
+                                       }));
                                }
                        }
                }
@@ -2174,25 +2174,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        //node knows the HMAC matched, so they already know what is there...
                        return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
                }
-
-               let mut channel_state = None;
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       if channel_state.is_none() {
-                                               channel_state = Some(self.channel_state.lock().unwrap());
-                                       }
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
-                                       })), channel_state.unwrap());
+                                       }));
                                }
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2246,14 +2241,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
 
-               channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
                        // with a short_channel_id of 0. This is important as various things later assume
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
-                               let id_option = channel_state.as_ref().unwrap().short_to_chan_info.get(&short_channel_id).cloned();
                                if let Some((err, code, chan_update)) = loop {
+                                       let mut channel_state = self.channel_state.lock().unwrap();
+                                       let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned();
                                        let forwarding_id_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
@@ -2267,7 +2262,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Some((_cp_id, chan_id)) => Some(chan_id.clone()),
                                        };
                                        let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
-                                               let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+                                               let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
@@ -2353,7 +2348,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               (pending_forward_info, channel_state.unwrap())
+               pending_forward_info
        }
 
        /// Gets the current channel_update for the given channel. This first checks if the channel is
@@ -2902,7 +2897,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
                        // the wallet module is in advance on the LDK view, allow one more block of headroom.
                        // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
-                       if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 {
+                       if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
                                return Err(APIError::APIMisuseError {
                                        err: "Funding transaction absolute timelock is non-final".to_owned()
                                });
@@ -3153,7 +3148,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
                                                                                                        let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
-                                                                                                       let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                                       let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                        let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -4850,7 +4845,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+               let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
@@ -7978,7 +7974,7 @@ pub mod bench {
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
-       use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+       use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
 
        use sync::{Arc, Mutex};
 
@@ -8040,7 +8036,7 @@ pub mod bench {
 
                let tx;
                if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
-                       tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                       tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                                value: 8_000_000, script_pubkey: output_script,
                        }]};
                        node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
@@ -8052,7 +8048,7 @@ pub mod bench {
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
 
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![tx],
                };
                Listen::block_connected(&node_a, &block, 1);
index dbc17a34138b04b59ca9207b574494c9a8485009..c978c61a3a563428e38f3368e557e544a6c623b9 100644 (file)
@@ -433,6 +433,11 @@ mod sealed {
        define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
                "Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
                set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
+       // We do not yet advertise the onion messages feature bit, but we need to detect when peers
+       // support it.
+       define_feature!(39, OnionMessages, [InitContext, NodeContext],
+               "Feature flags for `option_onion_messages`.", set_onion_messages_optional,
+               set_onion_messages_required, supports_onion_messages, requires_onion_messages);
        define_feature!(45, ChannelType, [InitContext, NodeContext],
                "Feature flags for `option_channel_type`.", set_channel_type_optional,
                set_channel_type_required, supports_channel_type, requires_channel_type);
@@ -767,7 +772,7 @@ impl<T: sealed::GossipQueries> Features<T> {
 
 impl<T: sealed::InitialRoutingSync> Features<T> {
        // We are no longer setting initial_routing_sync now that gossip_queries
-       // is enabled. This feature is ignored by a peer when gossip_queries has 
+       // is enabled. This feature is ignored by a peer when gossip_queries has
        // been negotiated.
        #[cfg(test)]
        pub(crate) fn clear_initial_routing_sync(&mut self) {
index 54d199a26f83f85cec4567baa1b1fae45d9e6faa..1f05cf9ac85f191788e7db734784ca5077f1aaf9 100644 (file)
@@ -47,6 +47,7 @@ use alloc::rc::Rc;
 use sync::{Arc, Mutex};
 use core::mem;
 use core::iter::repeat;
+use bitcoin::{PackedLockTime, TxMerkleNode};
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
@@ -77,11 +78,11 @@ pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &T
                connect_blocks(node, conf_height - first_connect_height);
        }
        let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: conf_height, bits: 42, nonce: 42 },
+               header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: conf_height, bits: 42, nonce: 42 },
                txdata: Vec::new(),
        };
        for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
-               block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() });
+               block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
        }
        block.txdata.push(tx.clone());
        connect_block(node, &block);
@@ -148,7 +149,7 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
 
        let height = node.best_block_info().1 + 1;
        let mut block = Block {
-               header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: height, bits: 42, nonce: 42 },
+               header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
                txdata: vec![],
        };
        assert!(depth >= 1);
@@ -156,7 +157,7 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
                let prev_blockhash = block.header.block_hash();
                do_connect_block(node, block, skip_intermediaries);
                block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
        }
@@ -318,20 +319,20 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                );
                                let mut chan_progress = 0;
                                loop {
-                                       let orig_announcements = self.gossip_sync.get_next_channel_announcements(chan_progress, 255);
-                                       let deserialized_announcements = gossip_sync.get_next_channel_announcements(chan_progress, 255);
+                                       let orig_announcements = self.gossip_sync.get_next_channel_announcement(chan_progress);
+                                       let deserialized_announcements = gossip_sync.get_next_channel_announcement(chan_progress);
                                        assert!(orig_announcements == deserialized_announcements);
-                                       chan_progress = match orig_announcements.last() {
+                                       chan_progress = match orig_announcements {
                                                Some(announcement) => announcement.0.contents.short_channel_id + 1,
                                                None => break,
                                        };
                                }
                                let mut node_progress = None;
                                loop {
-                                       let orig_announcements = self.gossip_sync.get_next_node_announcements(node_progress.as_ref(), 255);
-                                       let deserialized_announcements = gossip_sync.get_next_node_announcements(node_progress.as_ref(), 255);
+                                       let orig_announcements = self.gossip_sync.get_next_node_announcement(node_progress.as_ref());
+                                       let deserialized_announcements = gossip_sync.get_next_node_announcement(node_progress.as_ref());
                                        assert!(orig_announcements == deserialized_announcements);
-                                       node_progress = match orig_announcements.last() {
+                                       node_progress = match orig_announcements {
                                                Some(announcement) => Some(announcement.contents.node_id),
                                                None => break,
                                        };
@@ -619,7 +620,7 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                        assert_eq!(*channel_value_satoshis, expected_chan_value);
                        assert_eq!(user_channel_id, expected_user_chan_id);
 
-                       let tx = Transaction { version: chan_id as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                       let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                        }]};
                        let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
@@ -893,11 +894,11 @@ macro_rules! check_spends {
                {
                        $(
                        for outp in $spends_txn.output.iter() {
-                               assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Input tx output didn't meet dust limit");
+                               assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Input tx output didn't meet dust limit");
                        }
                        )*
                        for outp in $tx.output.iter() {
-                               assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Spending tx output didn't meet dust limit");
+                               assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Spending tx output didn't meet dust limit");
                        }
                        let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| {
                                $(
@@ -1533,13 +1534,11 @@ macro_rules! expect_payment_failed {
        };
 }
 
-pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
-       node: &'a Node<'b, 'c, 'd>, expected_payment_hash: PaymentHash, expected_rejected_by_dest: bool,
-       conditions: PaymentFailedConditions<'e>
+pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>(
+       node: &'a Node<'b, 'c, 'd>, payment_failed_event: Event, expected_payment_hash: PaymentHash,
+       expected_rejected_by_dest: bool, conditions: PaymentFailedConditions<'e>
 ) {
-       let mut events = node.node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       let expected_payment_id = match events.pop().unwrap() {
+       let expected_payment_id = match payment_failed_event {
                Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
                        #[cfg(test)]
                        error_code,
@@ -1602,6 +1601,15 @@ pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
        }
 }
 
+pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
+       node: &'a Node<'b, 'c, 'd>, expected_payment_hash: PaymentHash, expected_rejected_by_dest: bool,
+       conditions: PaymentFailedConditions<'e>
+) {
+       let mut events = node.node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       expect_payment_failed_conditions_event(node, events.pop().unwrap(), expected_payment_hash, expected_rejected_by_dest, conditions);
+}
+
 pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) -> PaymentId {
        let payment_id = origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
        check_added_monitors!(origin_node, expected_paths.len());
@@ -2125,9 +2133,9 @@ pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::Cha
                        if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
                                check_spends!(tx, res[0]);
                                if has_htlc_tx == HTLCType::TIMEOUT {
-                                       assert!(tx.lock_time != 0);
+                                       assert!(tx.lock_time.0 != 0);
                                } else {
-                                       assert!(tx.lock_time == 0);
+                                       assert!(tx.lock_time.0 == 0);
                                }
                                res.push(tx.clone());
                                false
index be517ea4a7698de94cd9fca02f7b57157a17e99f..c41d5402ff3861e2dafaff10da98d9d4b49c25d6 100644 (file)
@@ -42,7 +42,7 @@ use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
-use bitcoin::{Transaction, TxIn, TxOut, Witness};
+use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness};
 use bitcoin::OutPoint as BitcoinOutPoint;
 
 use bitcoin::secp256k1::Secp256k1;
@@ -55,6 +55,7 @@ use prelude::*;
 use alloc::collections::BTreeSet;
 use core::default::Default;
 use core::iter::repeat;
+use bitcoin::hashes::Hash;
 use sync::{Arc, Mutex};
 
 use ln::functional_test_utils::*;
@@ -74,7 +75,7 @@ fn test_insane_channel_opens() {
        // Instantiate channel parameters where we push the maximum msats given our
        // funding satoshis
        let channel_value_sat = 31337; // same as funding satoshis
-       let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
+       let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
        let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
 
        // Have node0 initiate a channel to node1 with aforementioned parameters
@@ -149,13 +150,14 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let default_config = UserConfig::default();
 
        // Have node0 initiate a channel to node1 with aforementioned parameters
        let mut push_amt = 100_000_000;
        let feerate_per_kw = 253;
        let opt_anchors = false;
        push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -502,7 +504,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
 
        if steps & 0b1000_0000 != 0{
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
                connect_block(&nodes[0], &block);
@@ -634,7 +636,8 @@ fn test_update_fee_that_funder_cannot_afford() {
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known());
        let channel_id = chan.2;
        let secp_ctx = Secp256k1::new();
-       let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value);
+       let default_config = UserConfig::default();
+       let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
 
        let opt_anchors = false;
 
@@ -1498,12 +1501,13 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+       let default_config = UserConfig::default();
        let opt_anchors = false;
 
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
 
@@ -1527,7 +1531,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+       let default_config = UserConfig::default();
        let opt_anchors = false;
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
@@ -1535,7 +1539,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
@@ -1580,7 +1584,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+       let default_config = UserConfig::default();
        let opt_anchors = false;
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
@@ -1588,7 +1592,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
 
        let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
@@ -1618,7 +1622,7 @@ fn test_chan_init_feerate_unaffordability() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+       let default_config = UserConfig::default();
        let opt_anchors = false;
 
        // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
@@ -1630,7 +1634,7 @@ fn test_chan_init_feerate_unaffordability() {
 
        // During open, we don't have a "counterparty channel reserve" to check against, so that
        // requirement only comes into play on the open_channel handling side.
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
        let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        open_channel_msg.push_msat += 1;
@@ -1749,10 +1753,11 @@ fn test_inbound_outbound_capacity_is_not_zero() {
        let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
        let channels0 = node_chanmgrs[0].list_channels();
        let channels1 = node_chanmgrs[1].list_channels();
+       let default_config = UserConfig::default();
        assert_eq!(channels0.len(), 1);
        assert_eq!(channels1.len(), 1);
 
-       let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100000);
+       let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
        assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
        assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
 
@@ -2713,11 +2718,11 @@ fn test_htlc_on_chain_success() {
        assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
        assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
        assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
-       assert_eq!(node_txn[0].lock_time, 0);
-       assert_eq!(node_txn[1].lock_time, 0);
+       assert_eq!(node_txn[0].lock_time.0, 0);
+       assert_eq!(node_txn[1].lock_time.0, 0);
 
        // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: node_txn});
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        {
@@ -2787,8 +2792,8 @@ fn test_htlc_on_chain_success() {
                        // Node[0]: ChannelManager: 3 (commtiemtn tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 HTLC-timeout
                        check_spends!(node_txn[1], $commitment_tx);
                        check_spends!(node_txn[2], $commitment_tx);
-                       assert_ne!(node_txn[1].lock_time, 0);
-                       assert_ne!(node_txn[2].lock_time, 0);
+                       assert_ne!(node_txn[1].lock_time.0, 0);
+                       assert_ne!(node_txn[2].lock_time.0, 0);
                        if $htlc_offered {
                                assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
                                assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
@@ -2837,7 +2842,7 @@ fn test_htlc_on_chain_success() {
        assert_eq!(commitment_spend.input.len(), 2);
        assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
        assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
-       assert_eq!(commitment_spend.lock_time, 0);
+       assert_eq!(commitment_spend.lock_time.0, 0);
        assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
        check_spends!(node_txn[3], chan_1.3);
        assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
@@ -2847,7 +2852,7 @@ fn test_htlc_on_chain_success() {
        // we already checked the same situation with A.
 
        // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
@@ -3404,7 +3409,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        assert_eq!(node_txn.len(), 3);
        assert_eq!(node_txn[0], node_txn[1]);
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -3487,7 +3492,7 @@ fn test_force_close_fail_back() {
        assert_eq!(node_txn.len(), 1);
        assert_eq!(node_txn[0].input.len(), 1);
        assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
-       assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
+       assert_eq!(node_txn[0].lock_time.0, 0); // Must be an HTLC-Success
        assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
 
        check_spends!(node_txn[0], tx);
@@ -4255,7 +4260,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
        };
 
        let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                txdata: vec![],
        };
        connect_block(&nodes[0], &block);
@@ -4800,7 +4805,7 @@ fn test_claim_sizeable_push_msat() {
        assert_eq!(spend_txn.len(), 1);
        assert_eq!(spend_txn[0].input.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
-       assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+       assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
 }
 
 #[test]
@@ -5030,10 +5035,10 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        assert_eq!(revoked_htlc_txn[1].input.len(), 1);
        assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
        check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]);
-       assert_ne!(revoked_htlc_txn[1].lock_time, 0); // HTLC-Timeout
+       assert_ne!(revoked_htlc_txn[1].lock_time.0, 0); // HTLC-Timeout
 
        // B will generate justice tx from A's revoked commitment/HTLC tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -5107,7 +5112,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
 
        // A will generate justice tx from B's revoked commitment/HTLC tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -5204,10 +5209,10 @@ fn test_onchain_to_onchain_claim() {
        assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
        assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
        assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
-       assert_eq!(c_txn[0].lock_time, 0); // Success tx
+       assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
 
        // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_events();
@@ -5265,7 +5270,7 @@ fn test_onchain_to_onchain_claim() {
        check_spends!(b_txn[0], commitment_tx[0]);
        assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
        assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
-       assert_eq!(b_txn[0].lock_time, 0); // Success tx
+       assert_eq!(b_txn[0].lock_time.0, 0); // Success tx
 
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -5473,7 +5478,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        assert_eq!(spend_txn.len(), 1);
        assert_eq!(spend_txn[0].input.len(), 1);
        check_spends!(spend_txn[0], node_tx);
-       assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+       assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
 }
 
 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
@@ -5821,11 +5826,11 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        check_spends!(spend_txn[0], local_txn[0]);
        assert_eq!(spend_txn[1].input.len(), 1);
        check_spends!(spend_txn[1], htlc_timeout);
-       assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+       assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
        assert_eq!(spend_txn[2].input.len(), 2);
        check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
-       assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
-               spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
+       assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
+               spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
 }
 
 #[test]
@@ -5904,11 +5909,11 @@ fn test_key_derivation_params() {
        check_spends!(spend_txn[0], local_txn_1[0]);
        assert_eq!(spend_txn[1].input.len(), 1);
        check_spends!(spend_txn[1], htlc_timeout);
-       assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+       assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
        assert_eq!(spend_txn[2].input.len(), 2);
        check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
-       assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
-               spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
+       assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
+               spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
 }
 
 #[test]
@@ -5967,7 +5972,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 
        let starting_block = nodes[1].best_block_info();
        let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                txdata: vec![],
        };
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
@@ -5998,7 +6003,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        // to "time out" the HTLC.
 
        let starting_block = nodes[1].best_block_info();
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
 
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
                connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
@@ -6045,7 +6050,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
 
        let starting_block = nodes[1].best_block_info();
        let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                txdata: vec![],
        };
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
@@ -7331,7 +7336,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                if !revoked {
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                } else {
-                       assert_eq!(timeout_tx[0].lock_time, 0);
+                       assert_eq!(timeout_tx[0].lock_time.0, 0);
                }
                // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
                mine_transaction(&nodes[0], &timeout_tx[0]);
@@ -7751,7 +7756,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
 
        // Actually revoke tx by claiming a HTLC
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
        check_added_monitors!(nodes[1], 1);
 
@@ -7851,7 +7856,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        // Revoke local commitment tx
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
 
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
@@ -7874,9 +7879,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        // Broadcast set of revoked txn on A
        let hash_128 = connect_blocks(&nodes[0], 40);
-       let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
-       let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
        let events = nodes[0].node.get_and_clear_pending_events();
        expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
@@ -7933,9 +7938,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        }
 
        // Connect one more block to see if bumped penalty are issued for HTLC txn
-       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
-       let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7974,7 +7979,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
                txn
        };
        // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
-       let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
        connect_blocks(&nodes[0], 20);
        {
@@ -8189,7 +8194,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
                node_txn.clear();
                penalty_txn
        };
-       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        {
@@ -8665,7 +8670,7 @@ fn test_secret_timeout() {
                        header: BlockHeader {
                                version: 0x2000000,
                                prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
-                               merkle_root: Default::default(),
+                               merkle_root: TxMerkleNode::all_zeros(),
                                time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
                        txdata: vec![],
                }
@@ -8810,7 +8815,7 @@ fn test_update_err_monitor_lockdown() {
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
@@ -8874,7 +8879,7 @@ fn test_concurrent_monitor_claim() {
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
@@ -8903,7 +8908,7 @@ fn test_concurrent_monitor_claim() {
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
 
        // Route another payment to generate another update with still previous HTLC pending
@@ -8928,7 +8933,7 @@ fn test_concurrent_monitor_claim() {
        check_added_monitors!(nodes[0], 1);
 
        //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
 
        // Watchtower Bob should have broadcast a commitment/HTLC-timeout
@@ -8941,7 +8946,7 @@ fn test_concurrent_monitor_claim() {
        };
 
        // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
        {
                let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -9016,7 +9021,7 @@ fn test_htlc_no_detection() {
        check_spends!(local_txn[0], chan_1.3);
 
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
        // We deliberately connect the local tx twice as this should provoke a failure calling
        // this test before #653 fix.
@@ -9034,7 +9039,7 @@ fn test_htlc_no_detection() {
                node_txn[1].clone()
        };
 
-       let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[0], our_payment_hash, true);
@@ -9095,7 +9100,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                        true => alice_txn.clone(),
                        false => get_local_commitment_txn!(nodes[1], chan_ab.2)
                };
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
                connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
                if broadcast_alice {
@@ -9178,7 +9183,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        let mut txn_to_broadcast = alice_txn.clone();
        if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
        if !go_onchain_before_fulfill {
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
                connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
                // If Bob was the one to force-close, he will have already passed these checks earlier.
                if broadcast_alice {
@@ -9510,14 +9515,14 @@ fn test_invalid_funding_tx() {
        // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
        // as its not 32 bytes long.
        let mut spend_tx = Transaction {
-               version: 2i32, lock_time: 0,
+               version: 2i32, lock_time: PackedLockTime::ZERO,
                input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
                        previous_output: BitcoinOutPoint {
                                txid: tx.txid(),
                                vout: idx as u32,
                        },
                        script_sig: Script::new(),
-                       sequence: 0xfffffffd,
+                       sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
                        witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
                }).collect(),
                output: vec![TxOut {
@@ -10428,12 +10433,12 @@ fn test_non_final_funding_tx() {
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
-       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: 0x1, witness: Witness::from_vec(vec!(vec!(1))) };
+       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
        assert_eq!(events.len(), 1);
        let mut tx = match events[0] {
                Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
                        // Timelock the transaction _beyond_ the best client height + 2.
-                       Transaction { version: chan_id as i32, lock_time: best_height + 3, input: vec![input], output: vec![TxOut {
+                       Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 3), input: vec![input], output: vec![TxOut {
                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                        }]}
                },
@@ -10448,7 +10453,7 @@ fn test_non_final_funding_tx() {
        }
 
        // However, transaction should be accepted if it's in a +2 headroom from best block.
-       tx.lock_time -= 1;
+       tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
        assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
        get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 }
index 28c86b79598cf70572069ce55561766a522b3130..af654837861fc2eeac3d1f7795453b1a206cae46 100644 (file)
@@ -43,7 +43,7 @@ pub mod channel;
 #[cfg(not(fuzzing))]
 pub(crate) mod channel;
 
-mod onion_utils;
+pub(crate) mod onion_utils;
 pub mod wire;
 
 // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro
index 4f36b9a88810aa5122c3347953028c6b31943a87..a2fccbbc3883d6bb1e4d7f97ff89021b979483be 100644 (file)
@@ -11,6 +11,7 @@
 
 use chain::channelmonitor::{ANTI_REORG_DELAY, Balance};
 use chain::transaction::OutPoint;
+use chain::chaininterface::LowerBoundedFeeEstimator;
 use ln::channel;
 use ln::channelmanager::BREAKDOWN_TIMEOUT;
 use ln::features::InitFeatures;
@@ -228,6 +229,17 @@ fn sorted_vec<T: Ord>(mut v: Vec<T>) -> Vec<T> {
        v
 }
 
+/// Asserts that `a` and `b` are close, but maybe off by up to 5.
+/// This is useful when checking fees and weights on transactions as things may vary by a few based
+/// on signature size and signature size estimation being non-exact.
+fn fuzzy_assert_eq<V: core::convert::TryInto<u64>>(a: V, b: V) {
+       let a_u64 = a.try_into().map_err(|_| ()).unwrap();
+       let b_u64 = b.try_into().map_err(|_| ()).unwrap();
+       eprintln!("Checking {} and {} for fuzzy equality", a_u64, b_u64);
+       assert!(a_u64 >= b_u64 - 5);
+       assert!(b_u64 >= a_u64 - 5);
+}
+
 fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // Tests `get_claimable_balances` with an HTLC across a force-close.
        // We build a channel with an HTLC pending, then force close the channel and check that the
@@ -268,18 +280,24 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
                        claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate *
                                (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 3_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
                sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
-       assert_eq!(vec![Balance::ClaimableOnChannelClose {
+       assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
                        claimable_amount_satoshis: 1_000,
-               }],
-               nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 3_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 4_000,
+                       expiry_height: htlc_cltv_timeout,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -323,12 +341,12 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                                chan_feerate * (channel::commitment_tx_base_weight(opt_anchors) +
                                                                if prev_commitment_tx { 1 } else { 2 } *
                                                                channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }];
        if !prev_commitment_tx {
-               a_expected_balances.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+               a_expected_balances.push(Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 3_000,
                        claimable_height: htlc_cltv_timeout,
                });
@@ -385,10 +403,10 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                        claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate *
                                (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
                        confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 3_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
@@ -416,10 +434,10 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
 
        // After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a
        // `SpendableOutputs` event. However, B still has to wait for the CSV delay.
-       assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+       assert_eq!(sorted_vec(vec![Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 3_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
@@ -447,16 +465,16 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        } else {
                expect_payment_sent!(nodes[0], payment_preimage);
        }
-       assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+       assert_eq!(sorted_vec(vec![Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 3_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
                sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       assert_eq!(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+       assert_eq!(vec![Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 4_000,
                        claimable_height: htlc_cltv_timeout,
                }],
@@ -628,10 +646,10 @@ fn test_balances_on_local_commitment_htlcs() {
                        claimable_amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate *
                                (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
                        confirmation_height: node_a_commitment_claimable,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 10_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 20_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
@@ -655,15 +673,15 @@ fn test_balances_on_local_commitment_htlcs() {
                        claimable_amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate *
                                (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
                        confirmation_height: node_a_commitment_claimable,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 10_000,
                        claimable_height: htlc_cltv_timeout,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 20_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
                sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
-       assert_eq!(as_txn[1].lock_time, nodes[0].best_block_info().1 + 1); // as_txn[1] can be included in the next block
+       assert_eq!(as_txn[1].lock_time.0, nodes[0].best_block_info().1 + 1); // as_txn[1] can be included in the next block
 
        // Now confirm nodes[0]'s HTLC-Timeout transaction, which changes the claimable balance to an
        // "awaiting confirmations" one.
@@ -679,7 +697,7 @@ fn test_balances_on_local_commitment_htlcs() {
                }, Balance::ClaimableAwaitingConfirmations {
                        claimable_amount_satoshis: 10_000,
                        confirmation_height: node_a_htlc_claimable,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 20_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
@@ -696,7 +714,7 @@ fn test_balances_on_local_commitment_htlcs() {
                }, Balance::ClaimableAwaitingConfirmations {
                        claimable_amount_satoshis: 10_000,
                        confirmation_height: node_a_htlc_claimable,
-               }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+               }, Balance::MaybeTimeoutClaimableHTLC {
                        claimable_amount_satoshis: 20_000,
                        claimable_height: htlc_cltv_timeout,
                }]),
@@ -734,3 +752,881 @@ fn test_balances_on_local_commitment_htlcs() {
        assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
        test_spendable_output(&nodes[0], &as_txn[1]);
 }
+
+#[test]
+fn test_no_preimage_inbound_htlc_balances() {
+       // Tests that MaybePreimageClaimableHTLC are generated for inbound HTLCs for which we do not
+       // have a preimage.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
+       let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+
+       // Send two HTLCs, one from A to B, and one from B to A.
+       let to_b_failed_payment_hash = route_payment(&nodes[0], &[&nodes[1]], 10_000_000).1;
+       let to_a_failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 20_000_000).1;
+       let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+       let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+       let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+       // Both A and B will have an HTLC that's claimable on timeout and one that's claimable if they
+       // receive the preimage. These will remain the same through the channel closure and until the
+       // HTLC output is spent.
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+                       claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       claimable_height: htlc_cltv_timeout,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+                       claimable_amount_satoshis: 500_000 - 20_000,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       claimable_height: htlc_cltv_timeout,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // Get nodes[0]'s commitment transaction and HTLC-Timeout transaction
+       let as_txn = get_local_commitment_txn!(nodes[0], chan_id);
+       assert_eq!(as_txn.len(), 2);
+       check_spends!(as_txn[1], as_txn[0]);
+       check_spends!(as_txn[0], funding_tx);
+
+       // Now close the channel by confirming A's commitment transaction on both nodes, checking the
+       // claimable balances remain the same except for the non-HTLC balance changing variant.
+       let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32;
+       let as_pre_spend_claims = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: node_a_commitment_claimable,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       claimable_height: htlc_cltv_timeout,
+               }]);
+
+       mine_transaction(&nodes[0], &as_txn[0]);
+       nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+
+       assert_eq!(as_pre_spend_claims,
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &as_txn[0]);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+
+       let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+       let mut bs_pre_spend_claims = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 500_000 - 20_000,
+                       confirmation_height: node_b_commitment_claimable,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       claimable_height: htlc_cltv_timeout,
+               }]);
+       assert_eq!(bs_pre_spend_claims,
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // We'll broadcast the HTLC-Timeout transaction one block prior to the htlc's expiration (as it
+       // is confirmable in the next block), but will still include the same claimable balances as no
+       // HTLC has been spent, even after the HTLC expires. We'll also fail the inbound HTLC, but it
+       // won't do anything as the channel is already closed.
+
+       connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
+       let as_htlc_timeout_claim = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_htlc_timeout_claim.len(), 1);
+       check_spends!(as_htlc_timeout_claim[0], as_txn[0]);
+       expect_pending_htlcs_forwardable_conditions!(nodes[0],
+               [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]);
+
+       assert_eq!(as_pre_spend_claims,
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], 1);
+       assert_eq!(as_pre_spend_claims,
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // For node B, we'll get the non-HTLC funds claimable after ANTI_REORG_DELAY confirmations
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+       test_spendable_output(&nodes[1], &as_txn[0]);
+       bs_pre_spend_claims.retain(|e| if let Balance::ClaimableAwaitingConfirmations { .. } = e { false } else { true });
+
+       // The next few blocks for B look the same as for A, though for the opposite HTLC
+       nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+       connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1) - 1);
+       expect_pending_htlcs_forwardable_conditions!(nodes[1],
+               [HTLCDestination::FailedPayment { payment_hash: to_b_failed_payment_hash }]);
+       let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(bs_htlc_timeout_claim.len(), 1);
+       check_spends!(bs_htlc_timeout_claim[0], as_txn[0]);
+
+       assert_eq!(bs_pre_spend_claims,
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[1], 1);
+       assert_eq!(bs_pre_spend_claims,
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // Now confirm the two HTLC timeout transactions for A, checking that the inbound HTLC resolves
+       // after ANTI_REORG_DELAY confirmations and the other takes BREAKDOWN_TIMEOUT confirmations.
+       mine_transaction(&nodes[0], &as_htlc_timeout_claim[0]);
+       let as_timeout_claimable_height = nodes[0].best_block_info().1 + (BREAKDOWN_TIMEOUT as u32) - 1;
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: node_a_commitment_claimable,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 10_000,
+                       confirmation_height: as_timeout_claimable_height,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[0], &bs_htlc_timeout_claim[0]);
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: node_a_commitment_claimable,
+               }, Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 20_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 10_000,
+                       confirmation_height: as_timeout_claimable_height,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // Once as_htlc_timeout_claim[0] reaches ANTI_REORG_DELAY confirmations, we should get a
+       // payment failure event.
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+       expect_payment_failed!(nodes[0], to_b_failed_payment_hash, true);
+
+       connect_blocks(&nodes[0], 1);
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: node_a_commitment_claimable,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 10_000,
+                       confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout),
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1);
+       assert_eq!(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 10_000,
+                       confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout),
+               }],
+               nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+       test_spendable_output(&nodes[0], &as_txn[0]);
+
+       connect_blocks(&nodes[0], as_timeout_claimable_height - nodes[0].best_block_info().1);
+       assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+       test_spendable_output(&nodes[0], &as_htlc_timeout_claim[0]);
+
+       // The process for B should be completely identical as well, noting that the non-HTLC-balance
+       // was already claimed.
+       mine_transaction(&nodes[1], &bs_htlc_timeout_claim[0]);
+       let bs_timeout_claimable_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+       assert_eq!(sorted_vec(vec![Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 20_000,
+                       confirmation_height: bs_timeout_claimable_height,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &as_htlc_timeout_claim[0]);
+       assert_eq!(sorted_vec(vec![Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       expiry_height: htlc_cltv_timeout,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 20_000,
+                       confirmation_height: bs_timeout_claimable_height,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+       expect_payment_failed!(nodes[1], to_a_failed_payment_hash, true);
+
+       assert_eq!(vec![Balance::MaybePreimageClaimableHTLC {
+                       claimable_amount_satoshis: 10_000,
+                       expiry_height: htlc_cltv_timeout,
+               }],
+               nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+       test_spendable_output(&nodes[1], &bs_htlc_timeout_claim[0]);
+
+       connect_blocks(&nodes[1], 1);
+       assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+}
+
+fn sorted_vec_with_additions<T: Ord + Clone>(v_orig: &Vec<T>, extra_ts: &[&T]) -> Vec<T> {
+       let mut v = v_orig.clone();
+       for t in extra_ts {
+               v.push((*t).clone());
+       }
+       v.sort_unstable();
+       v
+}
+
+fn do_test_revoked_counterparty_commitment_balances(confirm_htlc_spend_first: bool) {
+       // Tests `get_claimable_balances` for revoked counterparty commitment transactions.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       // We broadcast a second-to-latest commitment transaction, without providing the revocation
+       // secret to the counterparty. However, because we always immediately take the revocation
+       // secret from the keys_manager, we would panic at broadcast as we're trying to sign a
+       // transaction which, from the point of view of our keys_manager, is revoked.
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, chan_id, funding_tx) =
+               create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+       let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+       // We create five HTLCs for B to claim against A's revoked commitment transaction:
+       //
+       // (1) one for which A is the originator and B knows the preimage
+       // (2) one for which B is the originator where the HTLC has since timed-out
+       // (3) one for which B is the originator but where the HTLC has not yet timed-out
+       // (4) one dust HTLC which is lost in the channel closure
+       // (5) one that actually isn't in the revoked commitment transaction at all, but was added in
+       //     later commitment transaction updates
+       //
+       // Though they could all be claimed in a single claim transaction, due to CLTV timeouts they
+       // are all currently claimed in separate transactions, which helps us test as we can claim
+       // HTLCs individually.
+
+       let (claimed_payment_preimage, claimed_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let timeout_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 4_000_000).1;
+       let dust_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 3_000).1;
+
+       let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+       connect_blocks(&nodes[0], 10);
+       connect_blocks(&nodes[1], 10);
+
+       let live_htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+       let live_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 5_000_000).1;
+
+       // Get the latest commitment transaction from A and then update the fee to revoke it
+       let as_revoked_txn = get_local_commitment_txn!(nodes[0], chan_id);
+       let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+       let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+
+       let missing_htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+       let missing_htlc_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 2_000_000).1;
+
+       nodes[1].node.claim_funds(claimed_payment_preimage);
+       expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_000);
+       check_added_monitors!(nodes[1], 1);
+       let _b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+
+       connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10);
+       check_closed_broadcast!(nodes[0], true);
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 6);
+       let mut failed_payments: HashSet<_> =
+               [timeout_payment_hash, dust_payment_hash, live_payment_hash, missing_htlc_payment_hash]
+               .iter().map(|a| *a).collect();
+       events.retain(|ev| {
+               match ev {
+                       Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { node_id, channel_id }, .. } => {
+                               assert_eq!(*channel_id, chan_id);
+                               assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id()));
+                               false
+                       },
+                       Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::FailedPayment { payment_hash }, .. } => {
+                               assert!(failed_payments.remove(payment_hash));
+                               false
+                       },
+                       _ => true,
+               }
+       });
+       assert!(failed_payments.is_empty());
+       if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
+       match &events[1] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
+               _ => panic!(),
+       }
+
+       connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
+       check_closed_broadcast!(nodes[1], true);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+
+       // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
+       // lists the two on-chain timeout-able HTLCs as claimable balances.
+       assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+                       claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3 - 2_000 + 3_000,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 2_000,
+                       claimable_height: missing_htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 4_000,
+                       claimable_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 5_000,
+                       claimable_height: live_htlc_cltv_timeout,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &as_revoked_txn[0]);
+       let mut claim_txn: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..).filter(|tx| tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid())).collect();
+       // Currently the revoked commitment is claimed in four transactions as the HTLCs all expire
+       // quite soon.
+       assert_eq!(claim_txn.len(), 4);
+       claim_txn.sort_unstable_by_key(|tx| tx.output.iter().map(|output| output.value).sum::<u64>());
+
+       // The following constants were determined experimentally
+       const BS_TO_SELF_CLAIM_EXP_WEIGHT: usize = 483;
+       const OUTBOUND_HTLC_CLAIM_EXP_WEIGHT: usize = 571;
+       const INBOUND_HTLC_CLAIM_EXP_WEIGHT: usize = 578;
+
+       // Check that the weight is close to the expected weight. Note that signature sizes vary
+       // somewhat so it may not always be exact.
+       fuzzy_assert_eq(claim_txn[0].weight(), OUTBOUND_HTLC_CLAIM_EXP_WEIGHT);
+       fuzzy_assert_eq(claim_txn[1].weight(), INBOUND_HTLC_CLAIM_EXP_WEIGHT);
+       fuzzy_assert_eq(claim_txn[2].weight(), INBOUND_HTLC_CLAIM_EXP_WEIGHT);
+       fuzzy_assert_eq(claim_txn[3].weight(), BS_TO_SELF_CLAIM_EXP_WEIGHT);
+
+       // The expected balance for the next three checks, with the largest-HTLC and to_self output
+       // claim balances separated out.
+       let expected_balance = vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in A's revoked commitment
+                       claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3,
+                       confirmation_height: nodes[1].best_block_info().1 + 5,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       claimable_amount_satoshis: 3_000,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       claimable_amount_satoshis: 4_000,
+               }];
+
+       let to_self_unclaimed_balance = Balance::CounterpartyRevokedOutputClaimable {
+               claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+                       (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+       };
+       let to_self_claimed_avail_height;
+       let largest_htlc_unclaimed_balance = Balance::CounterpartyRevokedOutputClaimable {
+               claimable_amount_satoshis: 5_000,
+       };
+       let largest_htlc_claimed_avail_height;
+
+       // Once the channel has been closed by A, B now considers all of the commitment transactions'
+       // outputs as `CounterpartyRevokedOutputClaimable`.
+       assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_unclaimed_balance, &largest_htlc_unclaimed_balance]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       if confirm_htlc_spend_first {
+               mine_transaction(&nodes[1], &claim_txn[2]);
+               largest_htlc_claimed_avail_height = nodes[1].best_block_info().1 + 5;
+               to_self_claimed_avail_height = nodes[1].best_block_info().1 + 6; // will be claimed in the next block
+       } else {
+               // Connect the to_self output claim, taking all of A's non-HTLC funds
+               mine_transaction(&nodes[1], &claim_txn[3]);
+               to_self_claimed_avail_height = nodes[1].best_block_info().1 + 5;
+               largest_htlc_claimed_avail_height = nodes[1].best_block_info().1 + 6; // will be claimed in the next block
+       }
+
+       let largest_htlc_claimed_balance = Balance::ClaimableAwaitingConfirmations {
+               claimable_amount_satoshis: 5_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+               confirmation_height: largest_htlc_claimed_avail_height,
+       };
+       let to_self_claimed_balance = Balance::ClaimableAwaitingConfirmations {
+               claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+                       (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+                       - chan_feerate * claim_txn[3].weight() as u64 / 1000,
+               confirmation_height: to_self_claimed_avail_height,
+       };
+
+       if confirm_htlc_spend_first {
+               assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_unclaimed_balance, &largest_htlc_claimed_balance]),
+                       sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+       } else {
+               assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_claimed_balance, &largest_htlc_unclaimed_balance]),
+                       sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+       }
+
+       if confirm_htlc_spend_first {
+               mine_transaction(&nodes[1], &claim_txn[3]);
+       } else {
+               mine_transaction(&nodes[1], &claim_txn[2]);
+       }
+       assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_claimed_balance, &largest_htlc_claimed_balance]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // Finally, connect the last two remaining HTLC spends and check that they move to
+       // `ClaimableAwaitingConfirmations`
+       mine_transaction(&nodes[1], &claim_txn[0]);
+       mine_transaction(&nodes[1], &claim_txn[1]);
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in A's revoked commitment
+                       claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3,
+                       confirmation_height: nodes[1].best_block_info().1 + 1,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+                               - chan_feerate * claim_txn[3].weight() as u64 / 1000,
+                       confirmation_height: to_self_claimed_avail_height,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 3_000 - chan_feerate * OUTBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+                       confirmation_height: nodes[1].best_block_info().1 + 4,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 4_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+                       confirmation_height: nodes[1].best_block_info().1 + 5,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: 5_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+                       confirmation_height: largest_htlc_claimed_avail_height,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[1], 1);
+       test_spendable_output(&nodes[1], &as_revoked_txn[0]);
+
+       let mut payment_failed_events = nodes[1].node.get_and_clear_pending_events();
+       expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
+               dust_payment_hash, true, PaymentFailedConditions::new());
+       expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
+               missing_htlc_payment_hash, true, PaymentFailedConditions::new());
+       assert!(payment_failed_events.is_empty());
+
+       connect_blocks(&nodes[1], 1);
+       test_spendable_output(&nodes[1], &claim_txn[if confirm_htlc_spend_first { 2 } else { 3 }]);
+       connect_blocks(&nodes[1], 1);
+       test_spendable_output(&nodes[1], &claim_txn[if confirm_htlc_spend_first { 3 } else { 2 }]);
+       expect_payment_failed!(nodes[1], live_payment_hash, true);
+       connect_blocks(&nodes[1], 1);
+       test_spendable_output(&nodes[1], &claim_txn[0]);
+       connect_blocks(&nodes[1], 1);
+       test_spendable_output(&nodes[1], &claim_txn[1]);
+       expect_payment_failed!(nodes[1], timeout_payment_hash, true);
+       assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+}
+
+#[test]
+fn test_revoked_counterparty_commitment_balances() {
+       do_test_revoked_counterparty_commitment_balances(true);
+       do_test_revoked_counterparty_commitment_balances(false);
+}
+
+#[test]
+fn test_revoked_counterparty_htlc_tx_balances() {
+       // Tests `get_claimable_balances` for revocation spends of HTLC transactions.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Create some initial channels
+       let (_, _, chan_id, funding_tx) =
+               create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 11_000_000, InitFeatures::known(), InitFeatures::known());
+       let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+       let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
+       let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_id);
+       assert_eq!(revoked_local_txn[0].input.len(), 1);
+       assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, funding_tx.txid());
+
+       // The to-be-revoked commitment tx should have two HTLCs and an output for both sides
+       assert_eq!(revoked_local_txn[0].output.len(), 4);
+
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+
+       let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+       let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+       // B will generate an HTLC-Success from its revoked commitment tx
+       mine_transaction(&nodes[1], &revoked_local_txn[0]);
+       check_closed_broadcast!(nodes[1], true);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       let revoked_htlc_success_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+       assert_eq!(revoked_htlc_success_txn.len(), 2);
+       assert_eq!(revoked_htlc_success_txn[0].input.len(), 1);
+       assert_eq!(revoked_htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+       check_spends!(revoked_htlc_success_txn[0], revoked_local_txn[0]);
+       check_spends!(revoked_htlc_success_txn[1], funding_tx);
+
+       connect_blocks(&nodes[1], TEST_FINAL_CLTV);
+       let revoked_htlc_timeout_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(revoked_htlc_timeout_txn.len(), 1);
+       check_spends!(revoked_htlc_timeout_txn[0], revoked_local_txn[0]);
+       assert_ne!(revoked_htlc_success_txn[0].input[0].previous_output, revoked_htlc_timeout_txn[0].input[0].previous_output);
+       assert_eq!(revoked_htlc_success_txn[0].lock_time.0, 0);
+       assert_ne!(revoked_htlc_timeout_txn[0].lock_time.0, 0);
+
+       // A will generate justice tx from B's revoked commitment/HTLC tx
+       mine_transaction(&nodes[0], &revoked_local_txn[0]);
+       check_closed_broadcast!(nodes[0], true);
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+       let as_commitment_claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_commitment_claim_txn.len(), 2);
+       check_spends!(as_commitment_claim_txn[0], revoked_local_txn[0]);
+       check_spends!(as_commitment_claim_txn[1], funding_tx);
+
+       // The next two checks have the same balance set for A - even though we confirm a revoked HTLC
+       // transaction our balance tracking doesn't use the on-chain value so the
+       // `CounterpartyRevokedOutputClaimable` entry doesn't change.
+       let as_balances = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in B's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 11_000 - 3_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: to_remote_conf_height,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 3_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 1_000,
+               }]);
+       assert_eq!(as_balances,
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[0], &revoked_htlc_success_txn[0]);
+       let as_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_htlc_claim_tx.len(), 2);
+       check_spends!(as_htlc_claim_tx[0], revoked_htlc_success_txn[0]);
+       check_spends!(as_htlc_claim_tx[1], revoked_local_txn[0]); // A has to generate a new claim for the remaining revoked
+                                                                 // outputs (which no longer includes the spent HTLC output)
+
+       assert_eq!(as_balances,
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       assert_eq!(as_htlc_claim_tx[0].output.len(), 1);
+       fuzzy_assert_eq(as_htlc_claim_tx[0].output[0].value,
+               3_000 - chan_feerate * (revoked_htlc_success_txn[0].weight() + as_htlc_claim_tx[0].weight()) as u64 / 1000);
+
+       mine_transaction(&nodes[0], &as_htlc_claim_tx[0]);
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in B's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 11_000 - 3_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+                       confirmation_height: to_remote_conf_height,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 1_000,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: as_htlc_claim_tx[0].output[0].value,
+                       confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 3);
+       test_spendable_output(&nodes[0], &revoked_local_txn[0]);
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output to B
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 1_000,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: as_htlc_claim_tx[0].output[0].value,
+                       confirmation_height: nodes[0].best_block_info().1 + 2,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], 2);
+       test_spendable_output(&nodes[0], &as_htlc_claim_tx[0]);
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 1_000,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], revoked_htlc_timeout_txn[0].lock_time.0 - nodes[0].best_block_info().1);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0],
+               [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
+       // As time goes on A may split its revocation claim transaction into multiple.
+       let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       for tx in as_fewer_input_rbf.iter() {
+               check_spends!(tx, revoked_local_txn[0]);
+       }
+
+       // Connect a number of additional blocks to ensure we don't forget the HTLC output needs
+       // claiming.
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       for tx in as_fewer_input_rbf.iter() {
+               check_spends!(tx, revoked_local_txn[0]);
+       }
+
+       mine_transaction(&nodes[0], &revoked_htlc_timeout_txn[0]);
+       let as_second_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_second_htlc_claim_tx.len(), 2);
+
+       check_spends!(as_second_htlc_claim_tx[0], revoked_htlc_timeout_txn[0]);
+       check_spends!(as_second_htlc_claim_tx[1], revoked_local_txn[0]);
+
+       // Connect blocks to finalize the HTLC resolution with the HTLC-Timeout transaction. In a
+       // previous iteration of the revoked balance handling this would result in us "forgetting" that
+       // the revoked HTLC output still needed to be claimed.
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 1_000,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[0], &as_second_htlc_claim_tx[0]);
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: 10_000,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: as_second_htlc_claim_tx[0].output[0].value,
+                       confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[0], &as_second_htlc_claim_tx[1]);
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_self output in B's revoked commitment
+                       claimable_amount_satoshis: as_second_htlc_claim_tx[1].output[0].value,
+                       confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+               }, Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: as_second_htlc_claim_tx[0].output[0].value,
+                       confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 2,
+               }]),
+               sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+       test_spendable_output(&nodes[0], &as_second_htlc_claim_tx[0]);
+       connect_blocks(&nodes[0], 1);
+       test_spendable_output(&nodes[0], &as_second_htlc_claim_tx[1]);
+
+       assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+}
+
+#[test]
+fn test_revoked_counterparty_aggregated_claims() {
+       // Tests `get_claimable_balances` for revoked counterparty commitment transactions when
+       // claiming with an aggregated claim transaction.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       // We broadcast a second-to-latest commitment transaction, without providing the revocation
+       // secret to the counterparty. However, because we always immediately take the revocation
+       // secret from the keys_manager, we would panic at broadcast as we're trying to sign a
+       // transaction which, from the point of view of our keys_manager, is revoked.
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, chan_id, funding_tx) =
+               create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+       let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+       // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
+       // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
+       // revocation-claim transaction.
+
+       let (claimed_payment_preimage, claimed_payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let revoked_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 4_000_000).1;
+
+       let htlc_cltv_timeout = nodes[1].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+       // Cheat by giving A's ChannelMonitor the preimage to the to-be-claimed HTLC so that we have an
+       // HTLC-claim transaction on the to-be-revoked state.
+       get_monitor!(nodes[0], chan_id).provide_payment_preimage(&claimed_payment_hash, &claimed_payment_preimage,
+               &node_cfgs[0].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger);
+
+       // Now get the latest commitment transaction from A and then update the fee to revoke it
+       let as_revoked_txn = get_local_commitment_txn!(nodes[0], chan_id);
+
+       assert_eq!(as_revoked_txn.len(), 2);
+       check_spends!(as_revoked_txn[0], funding_tx);
+       check_spends!(as_revoked_txn[1], as_revoked_txn[0]); // The HTLC-Claim transaction
+
+       let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+       let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+
+       {
+               let mut feerate = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate += 1;
+       }
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(nodes[0], 1);
+
+       let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+       commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+       nodes[0].node.claim_funds(claimed_payment_preimage);
+       expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_000);
+       check_added_monitors!(nodes[0], 1);
+       let _a_htlc_msgs = get_htlc_update_msgs!(&nodes[0], nodes[1].node.get_our_node_id());
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+                       claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 4_000,
+                       claimable_height: htlc_cltv_timeout,
+               }, Balance::MaybeTimeoutClaimableHTLC {
+                       claimable_amount_satoshis: 3_000,
+                       claimable_height: htlc_cltv_timeout,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &as_revoked_txn[0]);
+       check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_added_monitors!(nodes[1], 1);
+
+       let mut claim_txn: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..).filter(|tx| tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid())).collect();
+       // Currently the revoked commitment outputs are all claimed in one aggregated transaction
+       assert_eq!(claim_txn.len(), 1);
+       assert_eq!(claim_txn[0].input.len(), 3);
+       check_spends!(claim_txn[0], as_revoked_txn[0]);
+
+       let to_remote_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in A's revoked commitment
+                       claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+                       confirmation_height: to_remote_maturity,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in A's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 4_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       claimable_amount_satoshis: 3_000,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       // Confirm A's HTLC-Success tranasction which presumably raced B's claim, causing B to create a
+       // new claim.
+       mine_transaction(&nodes[1], &as_revoked_txn[1]);
+       expect_payment_sent!(nodes[1], claimed_payment_preimage);
+       let mut claim_txn_2: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+       claim_txn_2.sort_unstable_by_key(|tx| if tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid()) { 0 } else { 1 });
+       // Once B sees the HTLC-Success transaction it splits its claim transaction into two, though in
+       // theory it could re-aggregate the claims as well.
+       assert_eq!(claim_txn_2.len(), 2);
+       assert_eq!(claim_txn_2[0].input.len(), 2);
+       check_spends!(claim_txn_2[0], as_revoked_txn[0]);
+       assert_eq!(claim_txn_2[1].input.len(), 1);
+       check_spends!(claim_txn_2[1], as_revoked_txn[1]);
+
+       assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+                       // to_remote output in A's revoked commitment
+                       claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+                       confirmation_height: to_remote_maturity,
+               }, Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in A's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 4_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       // The amount here is a bit of a misnomer, really its been reduced by the HTLC
+                       // transaction fee, but the claimable amount is always a bit of an overshoot for HTLCs
+                       // anyway, so its not a big change.
+                       claimable_amount_satoshis: 3_000,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[1], 5);
+       test_spendable_output(&nodes[1], &as_revoked_txn[0]);
+
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in A's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 4_000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+                       // The amount here is a bit of a misnomer, really its been reduced by the HTLC
+                       // transaction fee, but the claimable amount is always a bit of an overshoot for HTLCs
+                       // anyway, so its not a big change.
+                       claimable_amount_satoshis: 3_000,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &claim_txn_2[1]);
+       let htlc_2_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in A's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 4_000,
+               }, Balance::ClaimableAwaitingConfirmations { // HTLC 2
+                       claimable_amount_satoshis: claim_txn_2[1].output[0].value,
+                       confirmation_height: htlc_2_claim_maturity,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       connect_blocks(&nodes[1], 5);
+       test_spendable_output(&nodes[1], &claim_txn_2[1]);
+
+       assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+                       // to_self output in A's revoked commitment
+                       claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+                               (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+               }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+                       claimable_amount_satoshis: 4_000,
+               }]),
+               sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+       mine_transaction(&nodes[1], &claim_txn_2[0]);
+       let rest_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+       assert_eq!(vec![Balance::ClaimableAwaitingConfirmations {
+                       claimable_amount_satoshis: claim_txn_2[0].output[0].value,
+                       confirmation_height: rest_claim_maturity,
+               }],
+               nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // We shouldn't fail the payment until we spend the output
+
+       connect_blocks(&nodes[1], 5);
+       expect_payment_failed!(nodes[1], revoked_payment_hash, true);
+       test_spendable_output(&nodes[1], &claim_txn_2[0]);
+       assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+}
index c2925d7354de4142c109ee79bf7b8c33fb0819ba..a32b17b9f0d331c50780bec4963ff4e17c3d4306 100644 (file)
@@ -31,6 +31,8 @@ use bitcoin::blockdata::script::Script;
 use bitcoin::hash_types::{Txid, BlockHash};
 
 use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
+use ln::onion_utils;
+use onion_message;
 
 use prelude::*;
 use core::fmt;
@@ -38,9 +40,9 @@ use core::fmt::Debug;
 use io::{self, Read};
 use io_extras::read_to_end;
 
-use util::events::MessageSendEventsProvider;
+use util::events::{MessageSendEventsProvider, OnionMessageProvider};
 use util::logger;
-use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt, Hostname};
+use util::ser::{BigSize, LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname};
 
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 
@@ -304,6 +306,14 @@ pub struct UpdateAddHTLC {
        pub(crate) onion_routing_packet: OnionPacket,
 }
 
+ /// An onion message to be sent or received from a peer
+#[derive(Clone, Debug, PartialEq)]
+pub struct OnionMessage {
+       /// Used in decrypting the onion packet's payload.
+       pub blinding_point: PublicKey,
+       pub(crate) onion_routing_packet: onion_message::Packet,
+}
+
 /// An update_fulfill_htlc message to be sent or received from a peer
 #[derive(Clone, Debug, PartialEq)]
 pub struct UpdateFulfillHTLC {
@@ -905,15 +915,15 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
        /// Handle an incoming channel_update message, returning true if it should be forwarded on,
        /// false or returning an Err otherwise.
        fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result<bool, LightningError>;
-       /// Gets a subset of the channel announcements and updates required to dump our routing table
-       /// to a remote node, starting at the short_channel_id indicated by starting_point and
-       /// including the batch_amount entries immediately higher in numerical value than starting_point.
-       fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)>;
-       /// Gets a subset of the node announcements required to dump our routing table to a remote node,
-       /// starting at the node *after* the provided publickey and including batch_amount entries
-       /// immediately higher (as defined by <PublicKey as Ord>::cmp) than starting_point.
+       /// Gets channel announcements and updates required to dump our routing table to a remote node,
+       /// starting at the short_channel_id indicated by starting_point and including announcements
+       /// for a single channel.
+       fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)>;
+       /// Gets a node announcement required to dump our routing table to a remote node, starting at
+       /// the node *after* the provided pubkey and including up to one announcement immediately
+       /// higher (as defined by <PublicKey as Ord>::cmp) than starting_point.
        /// If None is provided for starting_point, we start at the first node.
-       fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement>;
+       fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement>;
        /// Called when a connection is established with a peer. This can be used to
        /// perform routing table synchronization using a strategy defined by the
        /// implementor.
@@ -935,6 +945,18 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
        fn handle_query_short_channel_ids(&self, their_node_id: &PublicKey, msg: QueryShortChannelIds) -> Result<(), LightningError>;
 }
 
+/// A trait to describe an object that can receive onion messages.
+pub trait OnionMessageHandler : OnionMessageProvider {
+       /// Handle an incoming onion_message message from the given peer.
+       fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage);
+       /// Called when a connection is established with a peer. Can be used to track which peers
+       /// advertise onion message support and are online.
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init);
+       /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
+       /// drop and refuse to forward onion messages to this peer.
+       fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+}
+
 mod fuzzy_internal_msgs {
        use prelude::*;
        use ln::{PaymentPreimage, PaymentSecret};
@@ -993,6 +1015,18 @@ pub(crate) struct OnionPacket {
        pub(crate) hmac: [u8; 32],
 }
 
+impl onion_utils::Packet for OnionPacket {
+       type Data = onion_utils::FixedSizeOnionPacket;
+       fn new(pubkey: PublicKey, hop_data: onion_utils::FixedSizeOnionPacket, hmac: [u8; 32]) -> Self {
+               Self {
+                       version: 0,
+                       public_key: Ok(pubkey),
+                       hop_data: hop_data.0,
+                       hmac,
+               }
+       }
+}
+
 impl PartialEq for OnionPacket {
        fn eq(&self, other: &OnionPacket) -> bool {
                for (i, j) in self.hop_data.iter().zip(other.hop_data.iter()) {
@@ -1327,17 +1361,40 @@ impl_writeable_msg!(UpdateAddHTLC, {
        onion_routing_packet
 }, {});
 
+impl Readable for OnionMessage {
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let blinding_point: PublicKey = Readable::read(r)?;
+               let len: u16 = Readable::read(r)?;
+               let mut packet_reader = FixedLengthReader::new(r, len as u64);
+               let onion_routing_packet: onion_message::Packet = <onion_message::Packet as LengthReadable>::read(&mut packet_reader)?;
+               Ok(Self {
+                       blinding_point,
+                       onion_routing_packet,
+               })
+       }
+}
+
+impl Writeable for OnionMessage {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.blinding_point.write(w)?;
+               let onion_packet_len = self.onion_routing_packet.serialized_length();
+               (onion_packet_len as u16).write(w)?;
+               self.onion_routing_packet.write(w)?;
+               Ok(())
+       }
+}
+
 impl Writeable for FinalOnionHopData {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.payment_secret.0.write(w)?;
-               HighZeroBytesDroppedVarInt(self.total_msat).write(w)
+               HighZeroBytesDroppedBigSize(self.total_msat).write(w)
        }
 }
 
 impl Readable for FinalOnionHopData {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let secret: [u8; 32] = Readable::read(r)?;
-               let amt: HighZeroBytesDroppedVarInt<u64> = Readable::read(r)?;
+               let amt: HighZeroBytesDroppedBigSize<u64> = Readable::read(r)?;
                Ok(Self { payment_secret: PaymentSecret(secret), total_msat: amt.0 })
        }
 }
@@ -1354,15 +1411,15 @@ impl Writeable for OnionHopData {
                        },
                        OnionHopDataFormat::NonFinalNode { short_channel_id } => {
                                encode_varint_length_prefixed_tlv!(w, {
-                                       (2, HighZeroBytesDroppedVarInt(self.amt_to_forward), required),
-                                       (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value), required),
+                                       (2, HighZeroBytesDroppedBigSize(self.amt_to_forward), required),
+                                       (4, HighZeroBytesDroppedBigSize(self.outgoing_cltv_value), required),
                                        (6, short_channel_id, required)
                                });
                        },
                        OnionHopDataFormat::FinalNode { ref payment_data, ref keysend_preimage } => {
                                encode_varint_length_prefixed_tlv!(w, {
-                                       (2, HighZeroBytesDroppedVarInt(self.amt_to_forward), required),
-                                       (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value), required),
+                                       (2, HighZeroBytesDroppedBigSize(self.amt_to_forward), required),
+                                       (4, HighZeroBytesDroppedBigSize(self.outgoing_cltv_value), required),
                                        (8, payment_data, option),
                                        (5482373484, keysend_preimage, option)
                                });
@@ -1373,27 +1430,22 @@ impl Writeable for OnionHopData {
 }
 
 impl Readable for OnionHopData {
-       fn read<R: Read>(mut r: &mut R) -> Result<Self, DecodeError> {
-               use bitcoin::consensus::encode::{Decodable, Error, VarInt};
-               let v: VarInt = Decodable::consensus_decode(&mut r)
-                       .map_err(|e| match e {
-                               Error::Io(ioe) => DecodeError::from(ioe),
-                               _ => DecodeError::InvalidValue
-                       })?;
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let b: BigSize = Readable::read(r)?;
                const LEGACY_ONION_HOP_FLAG: u64 = 0;
-               let (format, amt, cltv_value) = if v.0 != LEGACY_ONION_HOP_FLAG {
-                       let mut rd = FixedLengthReader::new(r, v.0);
-                       let mut amt = HighZeroBytesDroppedVarInt(0u64);
-                       let mut cltv_value = HighZeroBytesDroppedVarInt(0u32);
+               let (format, amt, cltv_value) = if b.0 != LEGACY_ONION_HOP_FLAG {
+                       let mut rd = FixedLengthReader::new(r, b.0);
+                       let mut amt = HighZeroBytesDroppedBigSize(0u64);
+                       let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
                        let mut short_id: Option<u64> = None;
                        let mut payment_data: Option<FinalOnionHopData> = None;
                        let mut keysend_preimage: Option<PaymentPreimage> = None;
-                       // The TLV type is chosen to be compatible with lnd and c-lightning.
                        decode_tlv_stream!(&mut rd, {
                                (2, amt, required),
                                (4, cltv_value, required),
                                (6, short_id, option),
                                (8, payment_data, option),
+                               // See https://github.com/lightning/blips/blob/master/blip-0003.md
                                (5482373484, keysend_preimage, option)
                        });
                        rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
@@ -1435,6 +1487,14 @@ impl Readable for OnionHopData {
        }
 }
 
+// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
+// onion message packets.
+impl ReadableArgs<()> for OnionHopData {
+       fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
+               <Self as Readable>::read(r)
+       }
+}
+
 impl Writeable for Ping {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.ponglen.write(w)?;
@@ -1860,7 +1920,7 @@ mod tests {
        use bitcoin::secp256k1::{PublicKey,SecretKey};
        use bitcoin::secp256k1::{Secp256k1, Message};
 
-       use io::Cursor;
+       use io::{self, Cursor};
        use prelude::*;
        use core::convert::TryFrom;
 
@@ -2771,4 +2831,40 @@ mod tests {
                assert_eq!(gossip_timestamp_filter.first_timestamp, 1590000000);
                assert_eq!(gossip_timestamp_filter.timestamp_range, 0xffff_ffff);
        }
+
+       #[test]
+       fn decode_onion_hop_data_len_as_bigsize() {
+               // Tests that we can decode an onion payload that is >253 bytes.
+               // Previously, receiving a payload of this size could've caused us to fail to decode a valid
+               // payload, because we were decoding the length (a BigSize, big-endian) as a VarInt
+               // (little-endian).
+
+               // Encode a test onion payload with a big custom TLV such that it's >253 bytes, forcing the
+               // payload length to be encoded over multiple bytes rather than a single u8.
+               let big_payload = encode_big_payload().unwrap();
+               let mut rd = Cursor::new(&big_payload[..]);
+               <msgs::OnionHopData as Readable>::read(&mut rd).unwrap();
+       }
+       // see above test, needs to be a separate method for use of the serialization macros.
+       fn encode_big_payload() -> Result<Vec<u8>, io::Error> {
+               use util::ser::HighZeroBytesDroppedBigSize;
+               let payload = msgs::OnionHopData {
+                       format: OnionHopDataFormat::NonFinalNode {
+                               short_channel_id: 0xdeadbeef1bad1dea,
+                       },
+                       amt_to_forward: 1000,
+                       outgoing_cltv_value: 0xffffffff,
+               };
+               let mut encoded_payload = Vec::new();
+               let test_bytes = vec![42u8; 1000];
+               if let OnionHopDataFormat::NonFinalNode { short_channel_id } = payload.format {
+                       encode_varint_length_prefixed_tlv!(&mut encoded_payload, {
+                               (1, test_bytes, vec_type),
+                               (2, HighZeroBytesDroppedBigSize(payload.amt_to_forward), required),
+                               (4, HighZeroBytesDroppedBigSize(payload.outgoing_cltv_value), required),
+                               (6, short_channel_id, required)
+                       });
+               }
+               Ok(encoded_payload)
+       }
 }
index b223a344dbe204aafcd511c5efc2d4e035cf1ba9..3795ad5ee77d70a03e5124fc3800823f610e0634 100644 (file)
@@ -15,7 +15,7 @@ use routing::gossip::NetworkUpdate;
 use routing::router::RouteHop;
 use util::chacha20::{ChaCha20, ChaChaReader};
 use util::errors::{self, APIError};
-use util::ser::{Readable, Writeable, LengthCalculatingWriter};
+use util::ser::{Readable, ReadableArgs, Writeable, LengthCalculatingWriter};
 use util::logger::Logger;
 
 use bitcoin::hashes::{Hash, HashEngine};
@@ -23,28 +23,36 @@ use bitcoin::hashes::cmp::fixed_time_eq;
 use bitcoin::hashes::hmac::{Hmac, HmacEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
 
-use bitcoin::secp256k1::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use prelude::*;
 use io::{Cursor, Read};
-use core::convert::TryInto;
+use core::convert::{AsMut, TryInto};
 use core::ops::Deref;
 
-pub(super) struct OnionKeys {
+pub(crate) struct OnionKeys {
        #[cfg(test)]
-       pub(super) shared_secret: SharedSecret,
+       pub(crate) shared_secret: SharedSecret,
        #[cfg(test)]
-       pub(super) blinding_factor: [u8; 32],
-       pub(super) ephemeral_pubkey: PublicKey,
-       pub(super) rho: [u8; 32],
-       pub(super) mu: [u8; 32],
+       pub(crate) blinding_factor: [u8; 32],
+       pub(crate) ephemeral_pubkey: PublicKey,
+       pub(crate) rho: [u8; 32],
+       pub(crate) mu: [u8; 32],
 }
 
 #[inline]
-pub(super) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
+pub(crate) fn gen_rho_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
+       assert_eq!(shared_secret.len(), 32);
+       let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
+       hmac.input(&shared_secret);
+       Hmac::from_engine(hmac).into_inner()
+}
+
+#[inline]
+pub(crate) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
        assert_eq!(shared_secret.len(), 32);
        ({
                let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
@@ -74,7 +82,7 @@ pub(super) fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
        Hmac::from_engine(hmac).into_inner()
 }
 
-pub(super) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, mut packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
+pub(crate) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
        let blinding_factor = {
                let mut sha = Sha256::engine();
                sha.input(&packet_pubkey.serialize()[..]);
@@ -82,7 +90,7 @@ pub(super) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verifica
                Sha256::from_engine(sha).into_inner()
        };
 
-       packet_pubkey.mul_assign(secp_ctx, &blinding_factor[..]).map(|_| packet_pubkey)
+       packet_pubkey.mul_tweak(secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap())
 }
 
 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
@@ -101,7 +109,7 @@ pub(super) fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(
 
                let ephemeral_pubkey = blinded_pub;
 
-               blinded_priv.mul_assign(&blinding_factor)?;
+               blinded_priv = blinded_priv.mul_tweak(&Scalar::from_be_bytes(blinding_factor).unwrap())?;
                blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
 
                callback(shared_secret, blinding_factor, ephemeral_pubkey, hop, idx);
@@ -187,8 +195,8 @@ pub(super) fn build_onion_payloads(path: &Vec<RouteHop>, total_msat: u64, paymen
 pub(crate) const ONION_DATA_LEN: usize = 20*65;
 
 #[inline]
-fn shift_arr_right(arr: &mut [u8; ONION_DATA_LEN], amt: usize) {
-       for i in (amt..ONION_DATA_LEN).rev() {
+fn shift_slice_right(arr: &mut [u8], amt: usize) {
+       for i in (amt..arr.len()).rev() {
                arr[i] = arr[i-amt];
        }
        for i in 0..amt {
@@ -210,14 +218,15 @@ pub(super) fn route_size_insane(payloads: &Vec<msgs::OnionHopData>) -> bool {
        false
 }
 
-/// panics if route_size_insane(paylods)
+/// panics if route_size_insane(payloads)
 pub(super) fn construct_onion_packet(payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
        let mut packet_data = [0; ONION_DATA_LEN];
 
        let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
        chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
 
-       construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
+       construct_onion_packet_with_init_noise::<_, _>(
+               payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
 }
 
 #[cfg(test)]
@@ -229,12 +238,50 @@ pub(super) fn construct_onion_packet_bogus_hopdata<HD: Writeable>(payloads: Vec<
        let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
        chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
 
-       construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
+       construct_onion_packet_with_init_noise::<_, _>(
+               payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
+}
+
+/// Since onion message packets and onion payment packets have different lengths but are otherwise
+/// identical, we use this trait to allow `construct_onion_packet_with_init_noise` to return either
+/// type.
+pub(crate) trait Packet {
+       type Data: AsMut<[u8]>;
+       fn new(pubkey: PublicKey, hop_data: Self::Data, hmac: [u8; 32]) -> Self;
+}
+
+// Needed for rustc versions older than 1.47 to avoid E0277: "arrays only have std trait
+// implementations for lengths 0..=32".
+pub(crate) struct FixedSizeOnionPacket(pub(crate) [u8; ONION_DATA_LEN]);
+
+impl AsMut<[u8]> for FixedSizeOnionPacket {
+       fn as_mut(&mut self) -> &mut [u8] {
+               &mut self.0
+       }
+}
+
+pub(crate) fn payloads_serialized_length<HD: Writeable>(payloads: &Vec<HD>) -> usize {
+       payloads.iter().map(|p| p.serialized_length() + 32 /* HMAC */).sum()
 }
 
-/// panics if route_size_insane(paylods)
-fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: [u8; ONION_DATA_LEN], associated_data: &PaymentHash) -> msgs::OnionPacket {
+/// panics if payloads_serialized_length(payloads) > packet_data_len
+pub(crate) fn construct_onion_message_packet<HD: Writeable, P: Packet<Data = Vec<u8>>>(
+       payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize) -> P
+{
+       let mut packet_data = vec![0; packet_data_len];
+
+       let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
+       chacha.process_in_place(&mut packet_data);
+
+       construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, packet_data, None)
+}
+
+/// panics if payloads_serialized_length(payloads) > packet_data.len()
+fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
+       mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data, associated_data: Option<&PaymentHash>) -> P
+{
        let filler = {
+               let packet_data = packet_data.as_mut();
                const ONION_HOP_DATA_LEN: usize = 65; // We may decrease this eventually after TLV is common
                let mut res = Vec::with_capacity(ONION_HOP_DATA_LEN * (payloads.len() - 1));
 
@@ -243,7 +290,7 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
                        if i == payloads.len() - 1 { break; }
 
                        let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
-                       for _ in 0..(ONION_DATA_LEN - pos) { // TODO: Batch this.
+                       for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
                                let mut dummy = [0; 1];
                                chacha.process_in_place(&mut dummy); // We don't have a seek function :(
                        }
@@ -251,7 +298,7 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
                        let mut payload_len = LengthCalculatingWriter(0);
                        payload.write(&mut payload_len).expect("Failed to calculate length");
                        pos += payload_len.0 + 32;
-                       assert!(pos <= ONION_DATA_LEN);
+                       assert!(pos <= packet_data.len());
 
                        res.resize(pos, 0u8);
                        chacha.process_in_place(&mut res);
@@ -263,29 +310,28 @@ fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>,
        for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
                let mut payload_len = LengthCalculatingWriter(0);
                payload.write(&mut payload_len).expect("Failed to calculate length");
-               shift_arr_right(&mut packet_data, payload_len.0 + 32);
+
+               let packet_data = packet_data.as_mut();
+               shift_slice_right(packet_data, payload_len.0 + 32);
                packet_data[0..payload_len.0].copy_from_slice(&payload.encode()[..]);
                packet_data[payload_len.0..(payload_len.0 + 32)].copy_from_slice(&hmac_res);
 
                let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
-               chacha.process_in_place(&mut packet_data);
+               chacha.process_in_place(packet_data);
 
                if i == 0 {
                        packet_data[ONION_DATA_LEN - filler.len()..ONION_DATA_LEN].copy_from_slice(&filler[..]);
                }
 
                let mut hmac = HmacEngine::<Sha256>::new(&keys.mu);
-               hmac.input(&packet_data);
-               hmac.input(&associated_data.0[..]);
+               hmac.input(packet_data);
+               if let Some(associated_data) = associated_data {
+                       hmac.input(&associated_data.0[..]);
+               }
                hmac_res = Hmac::from_engine(hmac).into_inner();
        }
 
-       msgs::OnionPacket {
-               version: 0,
-               public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
-               hop_data: packet_data,
-               hmac: hmac_res,
-       }
+       P::new(onion_keys.first().unwrap().ephemeral_pubkey, packet_data, hmac_res)
 }
 
 /// Encrypts a failure packet. raw_packet can either be a
@@ -534,7 +580,50 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
        } else { unreachable!(); }
 }
 
-/// Data decrypted from the onion payload.
+/// An input used when decoding an onion packet.
+pub(crate) trait DecodeInput {
+       type Arg;
+       /// If Some, this is the input when checking the hmac of the onion packet.
+       fn payment_hash(&self) -> Option<&PaymentHash>;
+       /// Read argument when decrypting our hop payload.
+       fn read_arg(self) -> Self::Arg;
+}
+
+impl DecodeInput for PaymentHash {
+       type Arg = ();
+       fn payment_hash(&self) -> Option<&PaymentHash> {
+               Some(self)
+       }
+       fn read_arg(self) -> Self::Arg { () }
+}
+
+impl DecodeInput for SharedSecret {
+       type Arg = SharedSecret;
+       fn payment_hash(&self) -> Option<&PaymentHash> {
+               None
+       }
+       fn read_arg(self) -> Self::Arg { self }
+}
+
+/// Allows `decode_next_hop` to return the next hop packet bytes for either payments or onion
+/// message forwards.
+pub(crate) trait NextPacketBytes: AsMut<[u8]> {
+       fn new(len: usize) -> Self;
+}
+
+impl NextPacketBytes for FixedSizeOnionPacket {
+       fn new(_len: usize) -> Self  {
+               Self([0 as u8; ONION_DATA_LEN])
+       }
+}
+
+impl NextPacketBytes for Vec<u8> {
+       fn new(len: usize) -> Self {
+               vec![0 as u8; len]
+       }
+}
+
+/// Data decrypted from a payment's onion payload.
 pub(crate) enum Hop {
        /// This onion payload was for us, not for forwarding to a next-hop. Contains information for
        /// verifying the incoming payment.
@@ -546,11 +635,12 @@ pub(crate) enum Hop {
                /// HMAC of the next hop's onion packet.
                next_hop_hmac: [u8; 32],
                /// Bytes of the onion packet we're forwarding.
-               new_packet_bytes: [u8; 20*65],
+               new_packet_bytes: [u8; ONION_DATA_LEN],
        },
 }
 
 /// Error returned when we fail to decode the onion packet.
+#[derive(Debug)]
 pub(crate) enum OnionDecodeErr {
        /// The HMAC of the onion packet did not match the hop data.
        Malformed {
@@ -564,11 +654,27 @@ pub(crate) enum OnionDecodeErr {
        },
 }
 
-pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash) -> Result<Hop, OnionDecodeErr> {
+pub(crate) fn decode_next_payment_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash) -> Result<Hop, OnionDecodeErr> {
+       match decode_next_hop(shared_secret, hop_data, hmac_bytes, payment_hash) {
+               Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
+               Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
+                       Ok(Hop::Forward {
+                               next_hop_data,
+                               next_hop_hmac,
+                               new_packet_bytes
+                       })
+               },
+               Err(e) => Err(e),
+       }
+}
+
+pub(crate) fn decode_next_hop<D: DecodeInput, R: ReadableArgs<D::Arg>, N: NextPacketBytes>(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], decode_input: D) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
        let (rho, mu) = gen_rho_mu_from_shared_secret(&shared_secret);
        let mut hmac = HmacEngine::<Sha256>::new(&mu);
        hmac.input(hop_data);
-       hmac.input(&payment_hash.0[..]);
+       if let Some(payment_hash) = decode_input.payment_hash() {
+               hmac.input(&payment_hash.0[..]);
+       }
        if !fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &hmac_bytes) {
                return Err(OnionDecodeErr::Malformed {
                        err_msg: "HMAC Check failed",
@@ -578,7 +684,7 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
 
        let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
        let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&hop_data[..]) };
-       match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
+       match R::read(&mut chacha_stream, decode_input.read_arg()) {
                Err(err) => {
                        let error_code = match err {
                                msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
@@ -616,10 +722,11 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
                                        chacha_stream.read_exact(&mut next_bytes).unwrap();
                                        assert_ne!(next_bytes[..], [0; 32][..]);
                                }
-                               return Ok(Hop::Receive(msg));
+                               return Ok((msg, None)); // We are the final destination for this packet
                        } else {
-                               let mut new_packet_bytes = [0; 20*65];
-                               let read_pos = chacha_stream.read(&mut new_packet_bytes).unwrap();
+                               let mut new_packet_bytes = N::new(hop_data.len());
+                               let read_pos = hop_data.len() - chacha_stream.read.position() as usize;
+                               chacha_stream.read_exact(&mut new_packet_bytes.as_mut()[..read_pos]).unwrap();
                                #[cfg(debug_assertions)]
                                {
                                        // Check two things:
@@ -631,12 +738,8 @@ pub(crate) fn decode_next_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_byt
                                }
                                // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
                                // fill the onion hop data we'll forward to our next-hop peer.
-                               chacha_stream.chacha.process_in_place(&mut new_packet_bytes[read_pos..]);
-                               return Ok(Hop::Forward {
-                                       next_hop_data: msg,
-                                       next_hop_hmac: hmac,
-                                       new_packet_bytes,
-                               })
+                               chacha_stream.chacha.process_in_place(&mut new_packet_bytes.as_mut()[read_pos..]);
+                               return Ok((msg, Some((hmac, new_packet_bytes)))) // This packet needs forwarding
                        }
                },
        }
@@ -775,7 +878,7 @@ mod tests {
                        },
                );
 
-               let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
+               let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
                // Just check the final packet encoding, as it includes all the per-hop vectors in it
                // anyway...
                assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
@@ -854,7 +957,7 @@ mod tests {
                        }),
                );
 
-               let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
+               let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
                // Just check the final packet encoding, as it includes all the per-hop vectors in it
                // anyway...
                assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a71a060daf367132b378b3a3883c0e2c0e026b8900b2b5cdbc784e1a3bb913f88a9c50f7d61ab590531cf08000178a333a347f8b4072ed056f820f77774345e183a342ec4729f3d84accf515e88adddb85ecc08daba68404bae9a8e8d7178977d7094a1ae549f89338c0777551f874159eb42d3a59fb9285ad4e24883f27de23942ec966611e99bee1cee503455be9e8e642cef6cef7b9864130f692283f8a973d47a8f1c1726b6e59969385975c766e35737c8d76388b64f748ee7943ffb0e2ee45c57a1abc40762ae598723d21bd184e2b338f68ebff47219357bd19cd7e01e2337b806ef4d717888e129e59cd3dc31e6201ccb2fd6d7499836f37a993262468bcb3a4dcd03a22818aca49c6b7b9b8e9e870045631d8e039b066ff86e0d1b7291f71cefa7264c70404a8e538b566c17ccc5feab231401e6c08a01bd5edfc1aa8e3e533b96e82d1f91118d508924b923531929aea889fcdf050597c681185f336b1da63b0939aa2b7c50b21b5eb7b6ad66c81fab98a3cdf73f658149e7e9ced4edde5d38c9b8f92e16f6b4ab13d7fca6a0e4ecc9f9de611a90da6e99c39551094c56e3196f282c5dffd9fc4b2fc12f3bca8e6fe47eb45fbdd3be21a8a8d200797eae3c9a0497132f92410d804977408494dff49dd3d8bce248e0b74fd9e6f0f7102c25ddfa02bd9ad9f746abbfa337ef811d5345a9e16b60de1767b209645ba40bd1f9a5f75bc04feca9b27c5554be4fe83fac2cb83aa447a817bb85ae966c68b420063833fada375e2f515965e687a45699632902672c654d1d18d7bcbf55e8fa57f63f2da449f8e1e606e8722df081e5f193fc4179feb99ad22819afdeef211f7c54afdba92aeef0c00b7bc2b65a4813c01f907a8377585708f2d4c940a25328e585714c8ded0a9a4d7a6de1027c1cb7a0198cd3db68b58c0704dfd0cfbe624e9cd18cc0ae5d96697bb476708b9ee0403d211e64e0d5a7683a7a9a140c02f0ff1c6e67a302941b4052bdea8a63e70a3ad62c5b89c698f1fd3c7685cb49705096cad702d02d93bcb1c27a409f4c9bddec001205ca4a2740f19b50900be81c7e847f1a863deea8d35701f1355cad8db57b1d4eb2ab4e29587734785abfb46ddede71928213d7d089dfdeda052827f459f1688cc0935bd47e7bcec27427c8376dcce7e22699567c0d145f8a7db33f6758815f1f15f9f7a9760dec4f34ae095edda4c64e9735bdd029c4e32c2ee31ba47ec5e6bdb97813d52dbd15b4e0b7a2c7f790ae64104d99f38c127f0a093288fa34144adb16b8968d4fa7656fcec99de8503dd46d3b03620a71c7cd085364abd30dccf7fbda25a1cdc102600149c9af1c97aa0372cd2e1909f28ac5c686f432b310e79528c9b8b9e8f314c1e74621ce6308ad2278b81d460892e0d9dd38b7c76d58be6dfd10ae7583ee1e7ef5b3f6f78dc60af0950df1b00cc55b6d178ba2e476bea0eaeef49323b83f05804159e7aef4eed4cc60dd07be76f067dfd0bcfb0b806b69ba921336a20c43c832d0cab8fa3ddeb29e3bf07b0d98a112eb07802756235a49d44a8b82a950d84e95e01971f0e106ccb337f07384e21620e0ad39e16ed9edca123226cf55ac44f449eeb53e38a7f27d101806e4823e4efcc887414240ee6826c4a5cb1c6443ad36ebf905a435c1d9054e54173911b17b5b40f60b3d9fd5f12eac54ca1e20191f5f18544d5fd3d665e9bcef96fb44b76110aa64d9db4c86c9513cbdad546538e8aec521fbe83ceac5e74a15629f1ed0b870a1d0d1e5680b6d6100d1bd3f3b9043bd35b8919c4088f1949b8be89e4701eb870f8ed64fafa446c78df3ea").unwrap());
index 785edecebbe3d0b7eddb0f0f6de2011f895fdde1..cc8f4ee27e8372922109e7e4268f244aba764a7d 100644 (file)
@@ -28,7 +28,8 @@ use util::enforcing_trait_impls::EnforcingSigner;
 use util::ser::{ReadableArgs, Writeable};
 use io;
 
-use bitcoin::{Block, BlockHeader, BlockHash};
+use bitcoin::{Block, BlockHeader, BlockHash, TxMerkleNode};
+use bitcoin::hashes::Hash;
 use bitcoin::network::constants::Network;
 
 use prelude::*;
@@ -605,7 +606,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
index d34fdfeb52a1ddcb887bfd42c4cfcddac8e7d0e9..f80c8984c1cb0437831e70510c6d9fa0e4e7e0fe 100644 (file)
@@ -19,7 +19,7 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
 use ln::features::InitFeatures;
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler};
+use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
 use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use util::ser::{VecWriter, Writeable, Writer};
 use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
@@ -27,7 +27,7 @@ use ln::wire;
 use ln::wire::Encode;
 use routing::gossip::{NetworkGraph, P2PGossipSync};
 use util::atomic_counter::AtomicCounter;
-use util::events::{MessageSendEvent, MessageSendEventsProvider};
+use util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
 use util::logger::Logger;
 
 use prelude::*;
@@ -67,15 +67,23 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
        fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
        fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
-       fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) ->
-               Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { Vec::new() }
-       fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> { Vec::new() }
+       fn get_next_channel_announcement(&self, _starting_point: u64) ->
+               Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
+       fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> { None }
        fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
 }
+impl OnionMessageProvider for IgnoringMessageHandler {
+       fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
+}
+impl OnionMessageHandler for IgnoringMessageHandler {
+       fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
+       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+}
 impl Deref for IgnoringMessageHandler {
        type Target = IgnoringMessageHandler;
        fn deref(&self) -> &Self { self }
@@ -199,9 +207,11 @@ impl Deref for ErroringMessageHandler {
 }
 
 /// Provides references to trait impls which handle different types of messages.
-pub struct MessageHandler<CM: Deref, RM: Deref> where
+pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
                CM::Target: ChannelMessageHandler,
-               RM::Target: RoutingMessageHandler {
+               RM::Target: RoutingMessageHandler,
+               OM::Target: OnionMessageHandler,
+{
        /// A message handler which handles messages specific to channels. Usually this is just a
        /// [`ChannelManager`] object or an [`ErroringMessageHandler`].
        ///
@@ -212,6 +222,10 @@ pub struct MessageHandler<CM: Deref, RM: Deref> where
        ///
        /// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
        pub route_handler: RM,
+
+       /// A message handler which handles onion messages. For now, this can only be an
+       /// [`IgnoringMessageHandler`].
+       pub onion_message_handler: OM,
 }
 
 /// Provides an object which can be used to send data to and which uniquely identifies a connection
@@ -298,7 +312,7 @@ const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
 /// we have fewer than this many messages in the outbound buffer again.
 /// We also use this as the target number of outbound gossip messages to keep in the write buffer,
 /// refilled as we send bytes.
-const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
+const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 12;
 /// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
 /// the peer.
 const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
@@ -323,6 +337,10 @@ const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4;
 /// tick. Once we have sent this many messages since the last ping, we send a ping right away to
 /// ensures we don't just fill up our send buffer and leave the peer with too many messages to
 /// process before the next ping.
+///
+/// Note that we continue responding to other messages even after we've sent this many messages, so
+/// it's more of a general guideline used for gossip backfill (and gossip forwarding, times
+/// [`FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO`]) than a hard limit.
 const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
 
 struct Peer {
@@ -333,6 +351,9 @@ struct Peer {
 
        pending_outbound_buffer: LinkedList<Vec<u8>>,
        pending_outbound_buffer_first_msg_offset: usize,
+       // Queue gossip broadcasts separately from `pending_outbound_buffer` so we can easily prioritize
+       // channel messages over them.
+       gossip_broadcast_buffer: LinkedList<Vec<u8>>,
        awaiting_write_event: bool,
 
        pending_read_buffer: Vec<u8>,
@@ -378,6 +399,42 @@ impl Peer {
                        InitSyncTracker::NodesSyncing(pk) => pk < node_id,
                }
        }
+
+       /// Returns whether we should be reading bytes from this peer, based on whether its outbound
+       /// buffer still has space and we don't need to pause reads to get some writes out.
+       fn should_read(&self) -> bool {
+               self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+       }
+
+       /// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
+       /// outbound buffer. This is checked every time the peer's buffer may have been drained.
+       fn should_buffer_gossip_backfill(&self) -> bool {
+               self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
+                       && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+       }
+
+       /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
+       /// every time the peer's buffer may have been drained.
+       fn should_buffer_onion_message(&self) -> bool {
+               self.pending_outbound_buffer.is_empty()
+                       && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+       }
+
+       /// Determines if we should push additional gossip broadcast messages onto a peer's outbound
+       /// buffer. This is checked every time the peer's buffer may have been drained.
+       fn should_buffer_gossip_broadcast(&self) -> bool {
+               self.pending_outbound_buffer.is_empty()
+                       && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+       }
+
+       /// Returns whether this peer's outbound buffers are full and we should drop gossip broadcasts.
+       fn buffer_full_drop_gossip_broadcast(&self) -> bool {
+               let total_outbound_buffered =
+                       self.gossip_broadcast_buffer.len() + self.pending_outbound_buffer.len();
+
+               total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ||
+                       self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
+       }
 }
 
 /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
@@ -387,7 +444,7 @@ impl Peer {
 /// issues such as overly long function definitions.
 ///
 /// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
+pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, IgnoringMessageHandler, Arc<L>, Arc<IgnoringMessageHandler>>;
 
 /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
 /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
@@ -397,7 +454,7 @@ pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArc
 /// helps with issues such as long function definitions.
 ///
 /// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'f L, IgnoringMessageHandler>;
+pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, IgnoringMessageHandler, &'f L, IgnoringMessageHandler>;
 
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
 /// socket events into messages which it passes on to its [`MessageHandler`].
@@ -418,12 +475,13 @@ pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L>
 /// you're using lightning-net-tokio.
 ///
 /// [`read_event`]: PeerManager::read_event
-pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> where
+pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> where
                CM::Target: ChannelMessageHandler,
                RM::Target: RoutingMessageHandler,
+               OM::Target: OnionMessageHandler,
                L::Target: Logger,
                CMH::Target: CustomMessageHandler {
-       message_handler: MessageHandler<CM, RM>,
+       message_handler: MessageHandler<CM, RM, OM>,
        /// Connection state for each connected peer - we have an outer read-write lock which is taken
        /// as read while we're doing processing for a peer and taken write when a peer is being added
        /// or removed.
@@ -482,31 +540,34 @@ macro_rules! encode_msg {
        }}
 }
 
-impl<Descriptor: SocketDescriptor, CM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, OM, L, IgnoringMessageHandler> where
                CM::Target: ChannelMessageHandler,
+               OM::Target: OnionMessageHandler,
                L::Target: Logger {
-       /// Constructs a new PeerManager with the given ChannelMessageHandler. No routing message
-       /// handler is used and network graph messages are ignored.
+       /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and
+       /// `OnionMessageHandler`. No routing message handler is used and network graph messages are
+       /// ignored.
        ///
        /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
        ///
        /// (C-not exported) as we can't export a PeerManager with a dummy route handler
-       pub fn new_channel_only(channel_message_handler: CM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
+       pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
                Self::new(MessageHandler {
                        chan_handler: channel_message_handler,
                        route_handler: IgnoringMessageHandler{},
+                       onion_message_handler,
                }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
        }
 }
 
-impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
                RM::Target: RoutingMessageHandler,
                L::Target: Logger {
-       /// Constructs a new PeerManager with the given RoutingMessageHandler. No channel message
-       /// handler is used and messages related to channels will be ignored (or generate error
-       /// messages). Note that some other lightning implementations time-out connections after some
-       /// time if no channel is built with the peer.
+       /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message
+       /// handler or onion message handler is used and onion and channel messages will be ignored (or
+       /// generate error messages). Note that some other lightning implementations time-out connections
+       /// after some time if no channel is built with the peer.
        ///
        /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
@@ -516,6 +577,7 @@ impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor,
                Self::new(MessageHandler {
                        chan_handler: ErroringMessageHandler::new(),
                        route_handler: routing_message_handler,
+                       onion_message_handler: IgnoringMessageHandler{},
                }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
        }
 }
@@ -561,15 +623,16 @@ fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
        }
 }
 
-impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, L, CMH> where
+impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, OM, L, CMH> where
                CM::Target: ChannelMessageHandler,
                RM::Target: RoutingMessageHandler,
+               OM::Target: OnionMessageHandler,
                L::Target: Logger,
                CMH::Target: CustomMessageHandler {
        /// Constructs a new PeerManager with the given message handlers and node_id secret key
        /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
-       pub fn new(message_handler: MessageHandler<CM, RM>, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
+       pub fn new(message_handler: MessageHandler<CM, RM, OM>, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
                let mut ephemeral_key_midstate = Sha256::engine();
                ephemeral_key_midstate.input(ephemeral_random_data);
 
@@ -644,6 +707,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
 
                        pending_outbound_buffer: LinkedList::new(),
                        pending_outbound_buffer_first_msg_offset: 0,
+                       gossip_broadcast_buffer: LinkedList::new(),
                        awaiting_write_event: false,
 
                        pending_read_buffer,
@@ -690,6 +754,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
 
                        pending_outbound_buffer: LinkedList::new(),
                        pending_outbound_buffer_first_msg_offset: 0,
+                       gossip_broadcast_buffer: LinkedList::new(),
                        awaiting_write_event: false,
 
                        pending_read_buffer,
@@ -710,46 +775,52 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
 
        fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
                while !peer.awaiting_write_event {
-                       if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && peer.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK {
+                       if peer.should_buffer_onion_message() {
+                               if let Some(peer_node_id) = peer.their_node_id {
+                                       if let Some(next_onion_message) =
+                                               self.message_handler.onion_message_handler.next_onion_message_for_peer(peer_node_id) {
+                                                       self.enqueue_message(peer, &next_onion_message);
+                                       }
+                               }
+                       }
+                       if peer.should_buffer_gossip_broadcast() {
+                               if let Some(msg) = peer.gossip_broadcast_buffer.pop_front() {
+                                       peer.pending_outbound_buffer.push_back(msg);
+                               }
+                       }
+                       if peer.should_buffer_gossip_backfill() {
                                match peer.sync_status {
                                        InitSyncTracker::NoSyncRequested => {},
                                        InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => {
-                                               let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
-                                               let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps);
-                                               for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() {
-                                                       self.enqueue_message(peer, announce);
-                                                       if let &Some(ref update_a) = update_a_option {
-                                                               self.enqueue_message(peer, update_a);
+                                               if let Some((announce, update_a_option, update_b_option)) =
+                                                       self.message_handler.route_handler.get_next_channel_announcement(c)
+                                               {
+                                                       self.enqueue_message(peer, &announce);
+                                                       if let Some(update_a) = update_a_option {
+                                                               self.enqueue_message(peer, &update_a);
                                                        }
-                                                       if let &Some(ref update_b) = update_b_option {
-                                                               self.enqueue_message(peer, update_b);
+                                                       if let Some(update_b) = update_b_option {
+                                                               self.enqueue_message(peer, &update_b);
                                                        }
                                                        peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
-                                               }
-                                               if all_messages.is_empty() || all_messages.len() != steps as usize {
+                                               } else {
                                                        peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff);
                                                }
                                        },
                                        InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => {
-                                               let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
-                                               let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
-                                               for msg in all_messages.iter() {
-                                                       self.enqueue_message(peer, msg);
+                                               if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(None) {
+                                                       self.enqueue_message(peer, &msg);
                                                        peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
-                                               }
-                                               if all_messages.is_empty() || all_messages.len() != steps as usize {
+                                               } else {
                                                        peer.sync_status = InitSyncTracker::NoSyncRequested;
                                                }
                                        },
                                        InitSyncTracker::ChannelsSyncing(_) => unreachable!(),
                                        InitSyncTracker::NodesSyncing(key) => {
-                                               let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
-                                               let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
-                                               for msg in all_messages.iter() {
-                                                       self.enqueue_message(peer, msg);
+                                               if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(Some(&key)) {
+                                                       self.enqueue_message(peer, &msg);
                                                        peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
-                                               }
-                                               if all_messages.is_empty() || all_messages.len() != steps as usize {
+                                               } else {
                                                        peer.sync_status = InitSyncTracker::NoSyncRequested;
                                                }
                                        },
@@ -759,18 +830,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                self.maybe_send_extra_ping(peer);
                        }
 
-                       if {
-                               let next_buff = match peer.pending_outbound_buffer.front() {
-                                       None => return,
-                                       Some(buff) => buff,
-                               };
+                       let next_buff = match peer.pending_outbound_buffer.front() {
+                               None => return,
+                               Some(buff) => buff,
+                       };
 
-                               let should_be_reading = peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
-                               let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
-                               let data_sent = descriptor.send_data(pending, should_be_reading);
-                               peer.pending_outbound_buffer_first_msg_offset += data_sent;
-                               if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
-                       } {
+                       let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
+                       let data_sent = descriptor.send_data(pending, peer.should_read());
+                       peer.pending_outbound_buffer_first_msg_offset += data_sent;
+                       if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
                                peer.pending_outbound_buffer_first_msg_offset = 0;
                                peer.pending_outbound_buffer.pop_front();
                        } else {
@@ -834,12 +902,6 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                }
        }
 
-       /// Append a message to a peer's pending outbound/write buffer
-       fn enqueue_encoded_message(&self, peer: &mut Peer, encoded_message: &Vec<u8>) {
-               peer.msgs_sent_since_pong += 1;
-               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
-       }
-
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
                let mut buffer = VecWriter(Vec::with_capacity(2048));
@@ -850,7 +912,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                } else {
                        log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()))
                }
-               self.enqueue_encoded_message(peer, &buffer.0);
+               peer.msgs_sent_since_pong += 1;
+               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&buffer.0[..]));
+       }
+
+       /// Append a message to a peer's pending outbound/write gossip broadcast buffer
+       fn enqueue_encoded_gossip_broadcast(&self, peer: &mut Peer, encoded_message: &Vec<u8>) {
+               peer.msgs_sent_since_pong += 1;
+               peer.gossip_broadcast_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
        }
 
        fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
@@ -1045,7 +1114,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        }
                                                }
                                        }
-                                       pause_read = peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
+                                       pause_read = !peer.should_read();
 
                                        if let Some(message) = msg_to_handle {
                                                match self.handle_message(&peer_mutex, peer_lock, message) {
@@ -1106,8 +1175,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        }
 
                        self.message_handler.route_handler.peer_connected(&their_node_id, &msg);
-
                        self.message_handler.chan_handler.peer_connected(&their_node_id, &msg);
+                       self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg);
+
                        peer_lock.their_features = Some(msg.features);
                        return Ok(None);
                } else if peer_lock.their_features.is_none() {
@@ -1280,6 +1350,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                self.message_handler.route_handler.handle_reply_channel_range(&their_node_id, msg)?;
                        },
 
+                       // Onion message:
+                       wire::Message::OnionMessage(msg) => {
+                               self.message_handler.onion_message_handler.handle_onion_message(&their_node_id, &msg);
+                       },
+
                        // Unknown messages:
                        wire::Message::Unknown(type_id) if message.is_even() => {
                                log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
@@ -1308,9 +1383,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
                                        }
-                                       if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
-                                               || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
-                                       {
+                                       if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
@@ -1321,7 +1394,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+                                       self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
                                }
                        },
                        wire::Message::NodeAnnouncement(ref msg) => {
@@ -1334,9 +1407,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
                                        }
-                                       if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
-                                               || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
-                                       {
+                                       if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
@@ -1346,7 +1417,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+                                       self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
                                }
                        },
                        wire::Message::ChannelUpdate(ref msg) => {
@@ -1359,16 +1430,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
                                        }
-                                       if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
-                                               || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
-                                       {
+                                       if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+                                       self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
                                }
                        },
                        _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"),
@@ -1663,6 +1732,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        }
                                        descriptor.disconnect_socket();
                                        self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                                }
                        }
                }
@@ -1690,6 +1760,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
                                        self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
                                        self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
                                }
                        }
                };
@@ -1710,6 +1781,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
                        peers_lock.remove(&descriptor);
                        self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
                        descriptor.disconnect_socket();
                }
        }
@@ -1725,6 +1797,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        if let Some(node_id) = peer.lock().unwrap().their_node_id {
                                log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
                                self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                               self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                        }
                        descriptor.disconnect_socket();
                }
@@ -1815,6 +1888,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
                                                        self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
                                                        self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                                                }
                                        }
                                }
@@ -1902,12 +1976,12 @@ mod tests {
                cfgs
        }
 
-       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
+       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
                let mut peers = Vec::new();
                for i in 0..peer_count {
                        let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
                        let ephemeral_bytes = [i as u8; 32];
-                       let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler };
+                       let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} };
                        let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {});
                        peers.push(peer);
                }
@@ -1915,7 +1989,7 @@ mod tests {
                peers
        }
 
-       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
+       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
                let secp_ctx = Secp256k1::new();
                let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
                let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
@@ -2060,10 +2134,10 @@ mod tests {
 
                // Check that each peer has received the expected number of channel updates and channel
                // announcements.
-               assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
-               assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
-               assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
-               assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+               assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+               assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
+               assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+               assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
        }
 
        #[test]
index fab6962e514957209b832c786916575be95fed4c..e4b916c9345d59c7aecd92ab536541a042534ad3 100644 (file)
@@ -28,6 +28,8 @@ use bitcoin::secp256k1::Secp256k1;
 
 use prelude::*;
 use core::mem;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
 
 use ln::functional_test_utils::*;
 
@@ -68,7 +70,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        check_added_monitors!(nodes[2], 1);
        get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 
-       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let claim_txn = if local_commitment {
                // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
                let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
@@ -128,10 +130,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
 
        if claim {
-               disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+               // Disconnect Node 1's HTLC-Timeout which was connected above
+               disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: claim_txn,
                };
                connect_block(&nodes[1], &block);
@@ -143,7 +146,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
index 0e25f46d472141fb2f1e7a2f58ada2bbabc25e15..595085114b0cdec9ee5e2988fa869f5a264eef35 100644 (file)
@@ -122,7 +122,7 @@ pub(crate) fn is_bolt2_compliant(script: &Script, features: &InitFeatures) -> bo
        if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wpkh() || script.is_v0_p2wsh() {
                true
        } else if features.supports_shutdown_anysegwit() {
-               script.is_witness_program() && script.as_bytes()[0] != SEGWIT_V0.into_u8()
+               script.is_witness_program() && script.as_bytes()[0] != SEGWIT_V0.to_u8()
        } else {
                false
        }
index cbf5c77d60095d73abe235c7652eeccb8380cf04..1191a8d3d531477977cad80776bb29f6c49ed2c8 100644 (file)
@@ -9,7 +9,7 @@
 
 //! Wire encoding/decoding for Lightning messages according to [BOLT #1], and for
 //! custom message through the [`CustomMessageReader`] trait.
-//! 
+//!
 //! [BOLT #1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
 
 use io;
@@ -60,6 +60,7 @@ pub(crate) enum Message<T> where T: core::fmt::Debug + Type + TestEq {
        ChannelReady(msgs::ChannelReady),
        Shutdown(msgs::Shutdown),
        ClosingSigned(msgs::ClosingSigned),
+       OnionMessage(msgs::OnionMessage),
        UpdateAddHTLC(msgs::UpdateAddHTLC),
        UpdateFulfillHTLC(msgs::UpdateFulfillHTLC),
        UpdateFailHTLC(msgs::UpdateFailHTLC),
@@ -100,6 +101,7 @@ impl<T> Message<T> where T: core::fmt::Debug + Type + TestEq {
                        &Message::ChannelReady(ref msg) => msg.type_id(),
                        &Message::Shutdown(ref msg) => msg.type_id(),
                        &Message::ClosingSigned(ref msg) => msg.type_id(),
+                       &Message::OnionMessage(ref msg) => msg.type_id(),
                        &Message::UpdateAddHTLC(ref msg) => msg.type_id(),
                        &Message::UpdateFulfillHTLC(ref msg) => msg.type_id(),
                        &Message::UpdateFailHTLC(ref msg) => msg.type_id(),
@@ -185,6 +187,9 @@ fn do_read<R: io::Read, T, H: core::ops::Deref>(buffer: &mut R, message_type: u1
                msgs::ClosingSigned::TYPE => {
                        Ok(Message::ClosingSigned(Readable::read(buffer)?))
                },
+               msgs::OnionMessage::TYPE => {
+                       Ok(Message::OnionMessage(Readable::read(buffer)?))
+               },
                msgs::UpdateAddHTLC::TYPE => {
                        Ok(Message::UpdateAddHTLC(Readable::read(buffer)?))
                },
@@ -344,6 +349,10 @@ impl Encode for msgs::ClosingSigned {
        const TYPE: u16 = 39;
 }
 
+impl Encode for msgs::OnionMessage {
+       const TYPE: u16 = 513;
+}
+
 impl Encode for msgs::UpdateAddHTLC {
        const TYPE: u16 = 128;
 }
diff --git a/lightning/src/onion_message/blinded_route.rs b/lightning/src/onion_message/blinded_route.rs
new file mode 100644 (file)
index 0000000..9f1d8db
--- /dev/null
@@ -0,0 +1,188 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Creating blinded routes and related utilities live here.
+
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+
+use chain::keysinterface::{KeysInterface, Sign};
+use super::utils;
+use ln::msgs::DecodeError;
+use util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
+use util::ser::{Readable, VecWriter, Writeable, Writer};
+
+use io;
+use prelude::*;
+
+/// Onion messages can be sent and received to blinded routes, which serve to hide the identity of
+/// the recipient.
+pub struct BlindedRoute {
+       /// To send to a blinded route, the sender first finds a route to the unblinded
+       /// `introduction_node_id`, which can unblind its [`encrypted_payload`] to find out the onion
+       /// message's next hop and forward it along.
+       ///
+       /// [`encrypted_payload`]: BlindedHop::encrypted_payload
+       pub(super) introduction_node_id: PublicKey,
+       /// Used by the introduction node to decrypt its [`encrypted_payload`] to forward the onion
+       /// message.
+       ///
+       /// [`encrypted_payload`]: BlindedHop::encrypted_payload
+       pub(super) blinding_point: PublicKey,
+       /// The hops composing the blinded route.
+       pub(super) blinded_hops: Vec<BlindedHop>,
+}
+
+/// Used to construct the blinded hops portion of a blinded route. These hops cannot be identified
+/// by outside observers and thus can be used to hide the identity of the recipient.
+pub struct BlindedHop {
+       /// The blinded node id of this hop in a blinded route.
+       pub(super) blinded_node_id: PublicKey,
+       /// The encrypted payload intended for this hop in a blinded route.
+       // The node sending to this blinded route will later encode this payload into the onion packet for
+       // this hop.
+       pub(super) encrypted_payload: Vec<u8>,
+}
+
+impl BlindedRoute {
+       /// Create a blinded route to be forwarded along `node_pks`. The last node pubkey in `node_pks`
+       /// will be the destination node.
+       ///
+       /// Errors if less than two hops are provided or if `node_pk`(s) are invalid.
+       //  TODO: make all payloads the same size with padding + add dummy hops
+       pub fn new<Signer: Sign, K: KeysInterface, T: secp256k1::Signing + secp256k1::Verification>
+               (node_pks: &[PublicKey], keys_manager: &K, secp_ctx: &Secp256k1<T>) -> Result<Self, ()>
+       {
+               if node_pks.len() < 2 { return Err(()) }
+               let blinding_secret_bytes = keys_manager.get_secure_random_bytes();
+               let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
+               let introduction_node_id = node_pks[0];
+
+               Ok(BlindedRoute {
+                       introduction_node_id,
+                       blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
+                       blinded_hops: blinded_hops(secp_ctx, node_pks, &blinding_secret).map_err(|_| ())?,
+               })
+       }
+}
+
+/// Construct blinded hops for the given `unblinded_path`.
+fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], session_priv: &SecretKey
+) -> Result<Vec<BlindedHop>, secp256k1::Error> {
+       let mut blinded_hops = Vec::with_capacity(unblinded_path.len());
+
+       let mut prev_ss_and_blinded_node_id = None;
+       utils::construct_keys_callback(secp_ctx, unblinded_path, None, session_priv, |blinded_node_id, _, _, encrypted_payload_ss, unblinded_pk, _| {
+               if let Some((prev_ss, prev_blinded_node_id)) = prev_ss_and_blinded_node_id {
+                       if let Some(pk) = unblinded_pk {
+                               let payload = ForwardTlvs {
+                                       next_node_id: pk,
+                                       next_blinding_override: None,
+                               };
+                               blinded_hops.push(BlindedHop {
+                                       blinded_node_id: prev_blinded_node_id,
+                                       encrypted_payload: encrypt_payload(payload, prev_ss),
+                               });
+                       } else { debug_assert!(false); }
+               }
+               prev_ss_and_blinded_node_id = Some((encrypted_payload_ss, blinded_node_id));
+       })?;
+
+       if let Some((final_ss, final_blinded_node_id)) = prev_ss_and_blinded_node_id {
+               let final_payload = ReceiveTlvs { path_id: None };
+               blinded_hops.push(BlindedHop {
+                       blinded_node_id: final_blinded_node_id,
+                       encrypted_payload: encrypt_payload(final_payload, final_ss),
+               });
+       } else { debug_assert!(false) }
+
+       Ok(blinded_hops)
+}
+
+/// Encrypt TLV payload to be used as a [`BlindedHop::encrypted_payload`].
+fn encrypt_payload<P: Writeable>(payload: P, encrypted_tlvs_ss: [u8; 32]) -> Vec<u8> {
+       let mut writer = VecWriter(Vec::new());
+       let write_adapter = ChaChaPolyWriteAdapter::new(encrypted_tlvs_ss, &payload);
+       write_adapter.write(&mut writer).expect("In-memory writes cannot fail");
+       writer.0
+}
+
+impl Writeable for BlindedRoute {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.introduction_node_id.write(w)?;
+               self.blinding_point.write(w)?;
+               (self.blinded_hops.len() as u8).write(w)?;
+               for hop in &self.blinded_hops {
+                       hop.write(w)?;
+               }
+               Ok(())
+       }
+}
+
+impl Readable for BlindedRoute {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let introduction_node_id = Readable::read(r)?;
+               let blinding_point = Readable::read(r)?;
+               let num_hops: u8 = Readable::read(r)?;
+               if num_hops == 0 { return Err(DecodeError::InvalidValue) }
+               let mut blinded_hops: Vec<BlindedHop> = Vec::with_capacity(num_hops.into());
+               for _ in 0..num_hops {
+                       blinded_hops.push(Readable::read(r)?);
+               }
+               Ok(BlindedRoute {
+                       introduction_node_id,
+                       blinding_point,
+                       blinded_hops,
+               })
+       }
+}
+
+impl_writeable!(BlindedHop, {
+       blinded_node_id,
+       encrypted_payload
+});
+
+/// TLVs to encode in an intermediate onion message packet's hop data. When provided in a blinded
+/// route, they are encoded into [`BlindedHop::encrypted_payload`].
+pub(crate) struct ForwardTlvs {
+       /// The node id of the next hop in the onion message's path.
+       pub(super) next_node_id: PublicKey,
+       /// Senders to a blinded route use this value to concatenate the route they find to the
+       /// introduction node with the blinded route.
+       pub(super) next_blinding_override: Option<PublicKey>,
+}
+
+/// Similar to [`ForwardTlvs`], but these TLVs are for the final node.
+pub(crate) struct ReceiveTlvs {
+       /// If `path_id` is `Some`, it is used to identify the blinded route that this onion message is
+       /// sending to. This is useful for receivers to check that said blinded route is being used in
+       /// the right context.
+       pub(super) path_id: Option<[u8; 32]>,
+}
+
+impl Writeable for ForwardTlvs {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               // TODO: write padding
+               encode_tlv_stream!(writer, {
+                       (4, self.next_node_id, required),
+                       (8, self.next_blinding_override, option)
+               });
+               Ok(())
+       }
+}
+
+impl Writeable for ReceiveTlvs {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               // TODO: write padding
+               encode_tlv_stream!(writer, {
+                       (6, self.path_id, option),
+               });
+               Ok(())
+       }
+}
diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs
new file mode 100644 (file)
index 0000000..5fec5be
--- /dev/null
@@ -0,0 +1,182 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion message testing and test utilities live here.
+
+use chain::keysinterface::{KeysInterface, Recipient};
+use ln::features::InitFeatures;
+use ln::msgs::{self, OnionMessageHandler};
+use super::{BlindedRoute, Destination, OnionMessenger, SendError};
+use util::enforcing_trait_impls::EnforcingSigner;
+use util::test_utils;
+
+use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::{PublicKey, Secp256k1};
+
+use sync::Arc;
+
+struct MessengerNode {
+       keys_manager: Arc<test_utils::TestKeysInterface>,
+       messenger: OnionMessenger<EnforcingSigner, Arc<test_utils::TestKeysInterface>, Arc<test_utils::TestLogger>>,
+       logger: Arc<test_utils::TestLogger>,
+}
+
+impl MessengerNode {
+       fn get_node_pk(&self) -> PublicKey {
+               let secp_ctx = Secp256k1::new();
+               PublicKey::from_secret_key(&secp_ctx, &self.keys_manager.get_node_secret(Recipient::Node).unwrap())
+       }
+}
+
+fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
+       let mut nodes = Vec::new();
+       for i in 0..num_messengers {
+               let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
+               let seed = [i as u8; 32];
+               let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               nodes.push(MessengerNode {
+                       keys_manager: keys_manager.clone(),
+                       messenger: OnionMessenger::new(keys_manager, logger.clone()),
+                       logger,
+               });
+       }
+       for idx in 0..num_messengers - 1 {
+               let i = idx as usize;
+               let mut features = InitFeatures::known();
+               features.set_onion_messages_optional();
+               let init_msg = msgs::Init { features, remote_network_address: None };
+               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone());
+               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone());
+       }
+       nodes
+}
+
+fn pass_along_path(path: &Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>) {
+       let mut prev_node = &path[0];
+       let num_nodes = path.len();
+       for (idx, node) in path.into_iter().skip(1).enumerate() {
+               let events = prev_node.messenger.release_pending_msgs();
+               let onion_msg =  {
+                       let msgs = events.get(&node.get_node_pk()).unwrap();
+                       assert_eq!(msgs.len(), 1);
+                       msgs[0].clone()
+               };
+               node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
+               if idx == num_nodes - 1 {
+                       node.logger.assert_log_contains(
+                               "lightning::onion_message::messenger".to_string(),
+                               format!("Received an onion message with path_id: {:02x?}", expected_path_id).to_string(), 1);
+               }
+               prev_node = node;
+       }
+}
+
+#[test]
+fn one_hop() {
+       let nodes = create_nodes(2);
+
+       nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap();
+       pass_along_path(&nodes, None);
+}
+
+#[test]
+fn two_unblinded_hops() {
+       let nodes = create_nodes(3);
+
+       nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk()], Destination::Node(nodes[2].get_node_pk()), None).unwrap();
+       pass_along_path(&nodes, None);
+}
+
+#[test]
+fn two_unblinded_two_blinded() {
+       let nodes = create_nodes(5);
+
+       let secp_ctx = Secp256k1::new();
+       let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[3].get_node_pk(), nodes[4].get_node_pk()], &*nodes[4].keys_manager, &secp_ctx).unwrap();
+
+       nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::BlindedRoute(blinded_route), None).unwrap();
+       pass_along_path(&nodes, None);
+}
+
+#[test]
+fn three_blinded_hops() {
+       let nodes = create_nodes(4);
+
+       let secp_ctx = Secp256k1::new();
+       let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+
+       nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap();
+       pass_along_path(&nodes, None);
+}
+
+#[test]
+fn too_big_packet_error() {
+       // Make sure we error as expected if a packet is too big to send.
+       let nodes = create_nodes(2);
+
+       let hop_node_id = nodes[1].get_node_pk();
+       let hops = [hop_node_id; 400];
+       let err = nodes[0].messenger.send_onion_message(&hops, Destination::Node(hop_node_id), None).unwrap_err();
+       assert_eq!(err, SendError::TooBigPacket);
+}
+
+#[test]
+fn invalid_blinded_route_error() {
+       // Make sure we error as expected if a provided blinded route has 0 or 1 hops.
+       let nodes = create_nodes(3);
+
+       // 0 hops
+       let secp_ctx = Secp256k1::new();
+       let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       blinded_route.blinded_hops.clear();
+       let err = nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap_err();
+       assert_eq!(err, SendError::TooFewBlindedHops);
+
+       // 1 hop
+       let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       blinded_route.blinded_hops.remove(0);
+       assert_eq!(blinded_route.blinded_hops.len(), 1);
+       let err = nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap_err();
+       assert_eq!(err, SendError::TooFewBlindedHops);
+}
+
+#[test]
+fn reply_path() {
+       let nodes = create_nodes(4);
+       let secp_ctx = Secp256k1::new();
+
+       // Destination::Node
+       let reply_path = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+       nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::Node(nodes[3].get_node_pk()), Some(reply_path)).unwrap();
+       pass_along_path(&nodes, None);
+       // Make sure the last node successfully decoded the reply path.
+       nodes[3].logger.assert_log_contains(
+               "lightning::onion_message::messenger".to_string(),
+               format!("Received an onion message with path_id: None and reply_path").to_string(), 1);
+
+       // Destination::BlindedRoute
+       let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+       let reply_path = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+
+       nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), Some(reply_path)).unwrap();
+       pass_along_path(&nodes, None);
+       nodes[3].logger.assert_log_contains(
+               "lightning::onion_message::messenger".to_string(),
+               format!("Received an onion message with path_id: None and reply_path").to_string(), 2);
+}
+
+#[test]
+fn peer_buffer_full() {
+       let nodes = create_nodes(2);
+       for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
+               nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap();
+       }
+       let err = nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap_err();
+       assert_eq!(err, SendError::BufferFull);
+}
diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs
new file mode 100644 (file)
index 0000000..e38a0d1
--- /dev/null
@@ -0,0 +1,468 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
+//! more information.
+
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
+
+use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
+use ln::msgs::{self, OnionMessageHandler};
+use ln::onion_utils;
+use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
+use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
+use super::utils;
+use util::events::OnionMessageProvider;
+use util::logger::Logger;
+use util::ser::Writeable;
+
+use core::ops::Deref;
+use sync::{Arc, Mutex};
+use prelude::*;
+
+/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
+/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
+/// and receiving empty onion messages is supported.
+///
+/// # Example
+///
+/// ```
+/// # extern crate bitcoin;
+/// # use bitcoin::hashes::_export::_core::time::Duration;
+/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
+/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
+/// # use lightning::util::logger::{Logger, Record};
+/// # use std::sync::Arc;
+/// # struct FakeLogger {};
+/// # impl Logger for FakeLogger {
+/// #     fn log(&self, record: &Record) { unimplemented!() }
+/// # }
+/// # let seed = [42u8; 32];
+/// # let time = Duration::from_secs(123456);
+/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
+/// # let logger = Arc::new(FakeLogger {});
+/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+/// # let secp_ctx = Secp256k1::new();
+/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
+/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
+/// hop_node_id1);
+/// # let destination_node_id = hop_node_id1;
+/// #
+/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
+/// // ChannelManager.
+/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
+///
+/// // Send an empty onion message to a node id.
+/// let intermediate_hops = [hop_node_id1, hop_node_id2];
+/// let reply_path = None;
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
+///
+/// // Create a blinded route to yourself, for someone to send an onion message to.
+/// # let your_node_id = hop_node_id1;
+/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
+/// let blinded_route = BlindedRoute::new::<InMemorySigner, _, _>(&hops, &keys_manager, &secp_ctx).unwrap();
+///
+/// // Send an empty onion message to a blinded route.
+/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
+/// let reply_path = None;
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
+/// ```
+///
+/// [offers]: <https://github.com/lightning/bolts/pull/798>
+/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       keys_manager: K,
+       logger: L,
+       pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
+       secp_ctx: Secp256k1<secp256k1::All>,
+       // Coming soon:
+       // invoice_handler: InvoiceHandler,
+       // custom_handler: CustomHandler, // handles custom onion messages
+}
+
+/// The destination of an onion message.
+pub enum Destination {
+       /// We're sending this onion message to a node.
+       Node(PublicKey),
+       /// We're sending this onion message to a blinded route.
+       BlindedRoute(BlindedRoute),
+}
+
+impl Destination {
+       pub(super) fn num_hops(&self) -> usize {
+               match self {
+                       Destination::Node(_) => 1,
+                       Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
+               }
+       }
+}
+
+/// Errors that may occur when [sending an onion message].
+///
+/// [sending an onion message]: OnionMessenger::send_onion_message
+#[derive(Debug, PartialEq)]
+pub enum SendError {
+       /// Errored computing onion message packet keys.
+       Secp256k1(secp256k1::Error),
+       /// Because implementations such as Eclair will drop onion messages where the message packet
+       /// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
+       TooBigPacket,
+       /// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
+       /// blinded hops.
+       TooFewBlindedHops,
+       /// Our next-hop peer was offline or does not support onion message forwarding.
+       InvalidFirstHop,
+       /// Our next-hop peer's buffer was full or our total outbound buffer was full.
+       BufferFull,
+}
+
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       /// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
+       /// their respective handlers.
+       pub fn new(keys_manager: K, logger: L) -> Self {
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
+               OnionMessenger {
+                       keys_manager,
+                       pending_messages: Mutex::new(HashMap::new()),
+                       secp_ctx,
+                       logger,
+               }
+       }
+
+       /// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
+       /// See [`OnionMessenger`] for example usage.
+       pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
+               if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
+                       if blinded_hops.len() < 2 {
+                               return Err(SendError::TooFewBlindedHops);
+                       }
+               }
+               let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
+               let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
+               let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
+                       (intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
+               } else {
+                       match destination {
+                               Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
+                               Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point, .. }) =>
+                                       (introduction_node_id, blinding_point),
+                       }
+               };
+               let (packet_payloads, packet_keys) = packet_payloads_and_keys(
+                       &self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
+                       .map_err(|e| SendError::Secp256k1(e))?;
+
+               let prng_seed = self.keys_manager.get_secure_random_bytes();
+               let onion_routing_packet = construct_onion_message_packet(
+                       packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
+
+               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
+               if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
+               match pending_per_peer_msgs.entry(introduction_node_id) {
+                       hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
+                       hash_map::Entry::Occupied(mut e) => {
+                               e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
+                               Ok(())
+                       }
+               }
+       }
+
+       #[cfg(test)]
+       pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
+               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               let mut msgs = HashMap::new();
+               // We don't want to disconnect the peers by removing them entirely from the original map, so we
+               // swap the pending message buffers individually.
+               for (peer_node_id, pending_messages) in &mut *pending_msgs {
+                       msgs.insert(*peer_node_id, core::mem::take(pending_messages));
+               }
+               msgs
+       }
+}
+
+fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
+       const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
+       const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
+       let mut total_buffered_bytes = 0;
+       let mut peer_buffered_bytes = 0;
+       for (pk, peer_buf) in buffer {
+               for om in peer_buf {
+                       let om_len = om.serialized_length();
+                       if pk == peer_node_id {
+                               peer_buffered_bytes += om_len;
+                       }
+                       total_buffered_bytes += om_len;
+
+                       if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
+                               peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
+                       {
+                               return true
+                       }
+               }
+       }
+       false
+}
+
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       /// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
+       /// soon we'll delegate the onion message to a handler that can generate invoices or send
+       /// payments.
+       fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
+               let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
+                       Ok(ss) => ss,
+                       Err(e) =>  {
+                               log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
+                               return
+                       }
+               };
+               let onion_decode_ss = {
+                       let blinding_factor = {
+                               let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+                               hmac.input(control_tlvs_ss.as_ref());
+                               Hmac::from_engine(hmac).into_inner()
+                       };
+                       match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
+                               Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
+                       {
+                               Ok(ss) => ss.secret_bytes(),
+                               Err(()) => {
+                                       log_trace!(self.logger, "Failed to compute onion packet shared secret");
+                                       return
+                               }
+                       }
+               };
+               match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
+                       msg.onion_routing_packet.hmac, control_tlvs_ss)
+               {
+                       Ok((Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
+                       }, None)) => {
+                               log_info!(self.logger,
+                                       "Received an onion message with path_id: {:02x?} and {}reply_path",
+                                               path_id, if reply_path.is_some() { "" } else { "no " });
+                       },
+                       Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
+                               next_node_id, next_blinding_override
+                       })), Some((next_hop_hmac, new_packet_bytes)))) => {
+                               // TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
+                               // blinded hop and this onion message is destined for us. In this situation, we should keep
+                               // unwrapping the onion layers to get to the final payload. Since we don't have the option
+                               // of creating blinded routes with dummy hops currently, we should be ok to not handle this
+                               // for now.
+                               let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
+                                       Ok(pk) => pk,
+                                       Err(e) => {
+                                               log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
+                                               return
+                                       }
+                               };
+                               let outgoing_packet = Packet {
+                                       version: 0,
+                                       public_key: new_pubkey,
+                                       hop_data: new_packet_bytes,
+                                       hmac: next_hop_hmac,
+                               };
+                               let onion_message = msgs::OnionMessage {
+                                       blinding_point: match next_blinding_override {
+                                               Some(blinding_point) => blinding_point,
+                                               None => {
+                                                       let blinding_factor = {
+                                                               let mut sha = Sha256::engine();
+                                                               sha.input(&msg.blinding_point.serialize()[..]);
+                                                               sha.input(control_tlvs_ss.as_ref());
+                                                               Sha256::from_engine(sha).into_inner()
+                                                       };
+                                                       let next_blinding_point = msg.blinding_point;
+                                                       match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
+                                                               Ok(bp) => bp,
+                                                               Err(e) => {
+                                                                       log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
+                                                                       return
+                                                               }
+                                                       }
+                                               },
+                                       },
+                                       onion_routing_packet: outgoing_packet,
+                               };
+
+                               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
+                               if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
+                                       log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
+                                       return
+                               }
+
+                               #[cfg(fuzzing)]
+                               pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
+
+                               match pending_per_peer_msgs.entry(next_node_id) {
+                                       hash_map::Entry::Vacant(_) => {
+                                               log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
+                                               return
+                                       },
+                                       hash_map::Entry::Occupied(mut e) => {
+                                               e.get_mut().push_back(onion_message);
+                                               log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+                                       }
+                               };
+                       },
+                       Err(e) => {
+                               log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
+                       },
+                       _ => {
+                               log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
+                       },
+               };
+       }
+
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) {
+               if init.features.supports_onion_messages() {
+                       let mut peers = self.pending_messages.lock().unwrap();
+                       peers.insert(their_node_id.clone(), VecDeque::new());
+               }
+       }
+
+       fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               pending_msgs.remove(their_node_id);
+       }
+}
+
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
+       where K::Target: KeysInterface<Signer = Signer>,
+             L::Target: Logger,
+{
+       fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
+               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
+                       return msgs.pop_front()
+               }
+               None
+       }
+}
+
+// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
+// produces
+/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
+/// [`SimpleArcPeerManager`]. See their docs for more details.
+///
+///[`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
+///[`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
+pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
+/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
+/// [`SimpleRefPeerManager`]. See their docs for more details.
+///
+///[`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
+///[`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
+pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
+
+/// Construct onion packet payloads and keys for sending an onion message along the given
+/// `unblinded_path` to the given `destination`.
+fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
+       Option<BlindedRoute>, session_priv: &SecretKey
+) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
+       let num_hops = unblinded_path.len() + destination.num_hops();
+       let mut payloads = Vec::with_capacity(num_hops);
+       let mut onion_packet_keys = Vec::with_capacity(num_hops);
+
+       let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
+               introduction_node_id, blinding_point, blinded_hops }) = &destination {
+               (Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
+       let num_unblinded_hops = num_hops - num_blinded_hops;
+
+       let mut unblinded_path_idx = 0;
+       let mut blinded_path_idx = 0;
+       let mut prev_control_tlvs_ss = None;
+       utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
+               if num_unblinded_hops != 0 && unblinded_path_idx < num_unblinded_hops {
+                       if let Some(ss) = prev_control_tlvs_ss.take() {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
+                                       ForwardTlvs {
+                                               next_node_id: unblinded_pk_opt.unwrap(),
+                                               next_blinding_override: None,
+                                       }
+                               )), ss));
+                       }
+                       prev_control_tlvs_ss = Some(control_tlvs_ss);
+                       unblinded_path_idx += 1;
+               } else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
+                       if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
+                                       next_node_id: intro_node_id,
+                                       next_blinding_override: Some(blinding_pt),
+                               })), control_tlvs_ss));
+                       }
+                       if let Some(encrypted_payload) = enc_payload_opt {
+                               payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
+                                       control_tlvs_ss));
+                       } else { debug_assert!(false); }
+                       blinded_path_idx += 1;
+               } else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
+                       payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
+                               control_tlvs_ss));
+                       blinded_path_idx += 1;
+               } else if let Some(encrypted_payload) = enc_payload_opt {
+                       payloads.push((Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
+                               reply_path: reply_path.take(),
+                       }, control_tlvs_ss));
+               }
+
+               let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
+               onion_packet_keys.push(onion_utils::OnionKeys {
+                       #[cfg(test)]
+                       shared_secret: onion_packet_ss,
+                       #[cfg(test)]
+                       blinding_factor: [0; 32],
+                       ephemeral_pubkey,
+                       rho,
+                       mu,
+               });
+       })?;
+
+       if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
+               payloads.push((Payload::Receive {
+                       control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
+                       reply_path: reply_path.take(),
+               }, control_tlvs_ss));
+       }
+
+       Ok((payloads, onion_packet_keys))
+}
+
+/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
+fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
+       // Spec rationale:
+       // "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
+       // onion, but this should be used sparingly as it is reduces anonymity set, hence the
+       // recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
+       let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
+       let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
+               SMALL_PACKET_HOP_DATA_LEN
+       } else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
+               BIG_PACKET_HOP_DATA_LEN
+       } else { return Err(()) };
+
+       Ok(onion_utils::construct_onion_message_packet::<_, _>(
+               payloads, onion_keys, prng_seed, hop_data_len))
+}
diff --git a/lightning/src/onion_message/mod.rs b/lightning/src/onion_message/mod.rs
new file mode 100644 (file)
index 0000000..2e23b76
--- /dev/null
@@ -0,0 +1,33 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion Messages: sending, receiving, forwarding, and ancillary utilities live here
+//!
+//! Onion messages are multi-purpose messages sent between peers over the lightning network. In the
+//! near future, they will be used to communicate invoices for [offers], unlocking use cases such as
+//! static invoices, refunds and proof of payer. Further, you will be able to accept payments
+//! without revealing your node id through the use of [blinded routes].
+//!
+//! LDK sends and receives onion messages via the [`OnionMessenger`]. See its documentation for more
+//! information on its usage.
+//!
+//! [offers]: <https://github.com/lightning/bolts/pull/798>
+//! [blinded routes]: crate::onion_message::BlindedRoute
+
+mod blinded_route;
+mod messenger;
+mod packet;
+mod utils;
+#[cfg(test)]
+mod functional_tests;
+
+// Re-export structs so they can be imported with just the `onion_message::` module prefix.
+pub use self::blinded_route::{BlindedRoute, BlindedHop};
+pub use self::messenger::{Destination, OnionMessenger, SendError, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
+pub(crate) use self::packet::Packet;
diff --git a/lightning/src/onion_message/packet.rs b/lightning/src/onion_message/packet.rs
new file mode 100644 (file)
index 0000000..1337bdb
--- /dev/null
@@ -0,0 +1,253 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Structs and enums useful for constructing and reading an onion message packet.
+
+use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::ecdh::SharedSecret;
+
+use ln::msgs::DecodeError;
+use ln::onion_utils;
+use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
+use util::chacha20poly1305rfc::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
+use util::ser::{BigSize, FixedLengthReader, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer};
+
+use core::cmp;
+use io::{self, Read};
+use prelude::*;
+
+// Per the spec, an onion message packet's `hop_data` field length should be
+// SMALL_PACKET_HOP_DATA_LEN if it fits, else BIG_PACKET_HOP_DATA_LEN if it fits.
+pub(super) const SMALL_PACKET_HOP_DATA_LEN: usize = 1300;
+pub(super) const BIG_PACKET_HOP_DATA_LEN: usize = 32768;
+
+#[derive(Clone, Debug, PartialEq)]
+pub(crate) struct Packet {
+       pub(super) version: u8,
+       pub(super) public_key: PublicKey,
+       // Unlike the onion packets used for payments, onion message packets can have payloads greater
+       // than 1300 bytes.
+       // TODO: if 1300 ends up being the most common size, optimize this to be:
+       // enum { ThirteenHundred([u8; 1300]), VarLen(Vec<u8>) }
+       pub(super) hop_data: Vec<u8>,
+       pub(super) hmac: [u8; 32],
+}
+
+impl onion_utils::Packet for Packet {
+       type Data = Vec<u8>;
+       fn new(public_key: PublicKey, hop_data: Vec<u8>, hmac: [u8; 32]) -> Packet {
+               Self {
+                       version: 0,
+                       public_key,
+                       hop_data,
+                       hmac,
+               }
+       }
+}
+
+impl Writeable for Packet {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.version.write(w)?;
+               self.public_key.write(w)?;
+               w.write_all(&self.hop_data)?;
+               self.hmac.write(w)?;
+               Ok(())
+       }
+}
+
+impl LengthReadable for Packet {
+       fn read<R: LengthRead>(r: &mut R) -> Result<Self, DecodeError> {
+               const READ_BUFFER_SIZE: usize = 4096;
+
+               let version = Readable::read(r)?;
+               let public_key = Readable::read(r)?;
+
+               let mut hop_data = Vec::new();
+               let hop_data_len = r.total_bytes().saturating_sub(66) as usize; // 1 (version) + 33 (pubkey) + 32 (HMAC) = 66
+               let mut read_idx = 0;
+               while read_idx < hop_data_len {
+                       let mut read_buffer = [0; READ_BUFFER_SIZE];
+                       let read_amt = cmp::min(hop_data_len - read_idx, READ_BUFFER_SIZE);
+                       r.read_exact(&mut read_buffer[..read_amt])?;
+                       hop_data.extend_from_slice(&read_buffer[..read_amt]);
+                       read_idx += read_amt;
+               }
+
+               let hmac = Readable::read(r)?;
+               Ok(Packet {
+                       version,
+                       public_key,
+                       hop_data,
+                       hmac,
+               })
+       }
+}
+
+/// Onion message payloads contain "control" TLVs and "data" TLVs. Control TLVs are used to route
+/// the onion message from hop to hop and for path verification, whereas data TLVs contain the onion
+/// message content itself, such as an invoice request.
+pub(super) enum Payload {
+       /// This payload is for an intermediate hop.
+       Forward(ForwardControlTlvs),
+       /// This payload is for the final hop.
+       Receive {
+               control_tlvs: ReceiveControlTlvs,
+               reply_path: Option<BlindedRoute>,
+               // Coming soon:
+               // message: Message,
+       }
+}
+
+// Coming soon:
+// enum Message {
+//     InvoiceRequest(InvoiceRequest),
+//     Invoice(Invoice),
+//     InvoiceError(InvoiceError),
+//     CustomMessage<T>,
+// }
+
+/// Forward control TLVs in their blinded and unblinded form.
+pub(super) enum ForwardControlTlvs {
+       /// If we're sending to a blinded route, the node that constructed the blinded route has provided
+       /// this hop's control TLVs, already encrypted into bytes.
+       Blinded(Vec<u8>),
+       /// If we're constructing an onion message hop through an intermediate unblinded node, we'll need
+       /// to construct the intermediate hop's control TLVs in their unblinded state to avoid encoding
+       /// them into an intermediate Vec. See [`super::blinded_route::ForwardTlvs`] for more info.
+       Unblinded(ForwardTlvs),
+}
+
+/// Receive control TLVs in their blinded and unblinded form.
+pub(super) enum ReceiveControlTlvs {
+       /// See [`ForwardControlTlvs::Blinded`].
+       Blinded(Vec<u8>),
+       /// See [`ForwardControlTlvs::Unblinded`] and [`super::blinded_route::ReceiveTlvs`].
+       Unblinded(ReceiveTlvs),
+}
+
+// Uses the provided secret to simultaneously encode and encrypt the unblinded control TLVs.
+impl Writeable for (Payload, [u8; 32]) {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               match &self.0 {
+                       Payload::Forward(ForwardControlTlvs::Blinded(encrypted_bytes)) => {
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (4, encrypted_bytes, vec_type)
+                               })
+                       },
+                       Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Blinded(encrypted_bytes), reply_path
+                       } => {
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (2, reply_path, option),
+                                       (4, encrypted_bytes, vec_type)
+                               })
+                       },
+                       Payload::Forward(ForwardControlTlvs::Unblinded(control_tlvs)) => {
+                               let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (4, write_adapter, required)
+                               })
+                       },
+                       Payload::Receive {
+                               control_tlvs: ReceiveControlTlvs::Unblinded(control_tlvs), reply_path,
+                       } => {
+                               let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
+                               encode_varint_length_prefixed_tlv!(w, {
+                                       (2, reply_path, option),
+                                       (4, write_adapter, required)
+                               })
+                       },
+               }
+               Ok(())
+       }
+}
+
+// Uses the provided secret to simultaneously decode and decrypt the control TLVs.
+impl ReadableArgs<SharedSecret> for Payload {
+       fn read<R: Read>(r: &mut R, encrypted_tlvs_ss: SharedSecret) -> Result<Self, DecodeError> {
+               let v: BigSize = Readable::read(r)?;
+               let mut rd = FixedLengthReader::new(r, v.0);
+               let mut reply_path: Option<BlindedRoute> = None;
+               let mut read_adapter: Option<ChaChaPolyReadAdapter<ControlTlvs>> = None;
+               let rho = onion_utils::gen_rho_from_shared_secret(&encrypted_tlvs_ss.secret_bytes());
+               decode_tlv_stream!(&mut rd, {
+                       (2, reply_path, option),
+                       (4, read_adapter, (option: LengthReadableArgs, rho))
+               });
+               rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
+
+               match read_adapter {
+                       None => return Err(DecodeError::InvalidValue),
+                       Some(ChaChaPolyReadAdapter { readable: ControlTlvs::Forward(tlvs)}) => {
+                               Ok(Payload::Forward(ForwardControlTlvs::Unblinded(tlvs)))
+                       },
+                       Some(ChaChaPolyReadAdapter { readable: ControlTlvs::Receive(tlvs)}) => {
+                               Ok(Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(tlvs), reply_path })
+                       },
+               }
+       }
+}
+
+/// When reading a packet off the wire, we don't know a priori whether the packet is to be forwarded
+/// or received. Thus we read a ControlTlvs rather than reading a ForwardControlTlvs or
+/// ReceiveControlTlvs directly.
+pub(super) enum ControlTlvs {
+       /// This onion message is intended to be forwarded.
+       Forward(ForwardTlvs),
+       /// This onion message is intended to be received.
+       Receive(ReceiveTlvs),
+}
+
+impl Readable for ControlTlvs {
+       fn read<R: Read>(mut r: &mut R) -> Result<Self, DecodeError> {
+               let mut _padding: Option<Padding> = None;
+               let mut _short_channel_id: Option<u64> = None;
+               let mut next_node_id: Option<PublicKey> = None;
+               let mut path_id: Option<[u8; 32]> = None;
+               let mut next_blinding_override: Option<PublicKey> = None;
+               decode_tlv_stream!(&mut r, {
+                       (1, _padding, option),
+                       (2, _short_channel_id, option),
+                       (4, next_node_id, option),
+                       (6, path_id, option),
+                       (8, next_blinding_override, option),
+               });
+
+               let valid_fwd_fmt  = next_node_id.is_some() && path_id.is_none();
+               let valid_recv_fmt = next_node_id.is_none() && next_blinding_override.is_none();
+
+               let payload_fmt = if valid_fwd_fmt {
+                       ControlTlvs::Forward(ForwardTlvs {
+                               next_node_id: next_node_id.unwrap(),
+                               next_blinding_override,
+                       })
+               } else if valid_recv_fmt {
+                       ControlTlvs::Receive(ReceiveTlvs {
+                               path_id,
+                       })
+               } else {
+                       return Err(DecodeError::InvalidValue)
+               };
+
+               Ok(payload_fmt)
+       }
+}
+
+/// Reads padding to the end, ignoring what's read.
+pub(crate) struct Padding {}
+impl Readable for Padding {
+       #[inline]
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               loop {
+                       let mut buf = [0; 8192];
+                       if reader.read(&mut buf[..])? == 0 { break; }
+               }
+               Ok(Self {})
+       }
+}
diff --git a/lightning/src/onion_message/utils.rs b/lightning/src/onion_message/utils.rs
new file mode 100644 (file)
index 0000000..52cadf6
--- /dev/null
@@ -0,0 +1,98 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Onion message utility methods live here.
+
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey, Scalar};
+use bitcoin::secp256k1::ecdh::SharedSecret;
+
+use ln::onion_utils;
+use super::blinded_route::BlindedRoute;
+use super::messenger::Destination;
+
+use prelude::*;
+
+// TODO: DRY with onion_utils::construct_onion_keys_callback
+#[inline]
+pub(super) fn construct_keys_callback<T: secp256k1::Signing + secp256k1::Verification,
+       FType: FnMut(PublicKey, SharedSecret, PublicKey, [u8; 32], Option<PublicKey>, Option<Vec<u8>>)>(
+       secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Option<Destination>,
+       session_priv: &SecretKey, mut callback: FType
+) -> Result<(), secp256k1::Error> {
+       let mut msg_blinding_point_priv = session_priv.clone();
+       let mut msg_blinding_point = PublicKey::from_secret_key(secp_ctx, &msg_blinding_point_priv);
+       let mut onion_packet_pubkey_priv = msg_blinding_point_priv.clone();
+       let mut onion_packet_pubkey = msg_blinding_point.clone();
+
+       macro_rules! build_keys {
+               ($pk: expr, $blinded: expr, $encrypted_payload: expr) => {{
+                       let encrypted_data_ss = SharedSecret::new(&$pk, &msg_blinding_point_priv);
+
+                       let blinded_hop_pk = if $blinded { $pk } else {
+                               let hop_pk_blinding_factor = {
+                                       let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+                                       hmac.input(encrypted_data_ss.as_ref());
+                                       Hmac::from_engine(hmac).into_inner()
+                               };
+                               $pk.mul_tweak(secp_ctx, &Scalar::from_be_bytes(hop_pk_blinding_factor).unwrap())?
+                       };
+                       let onion_packet_ss = SharedSecret::new(&blinded_hop_pk, &onion_packet_pubkey_priv);
+
+                       let rho = onion_utils::gen_rho_from_shared_secret(encrypted_data_ss.as_ref());
+                       let unblinded_pk_opt = if $blinded { None } else { Some($pk) };
+                       callback(blinded_hop_pk, onion_packet_ss, onion_packet_pubkey, rho, unblinded_pk_opt, $encrypted_payload);
+                       (encrypted_data_ss, onion_packet_ss)
+               }}
+       }
+
+       macro_rules! build_keys_in_loop {
+               ($pk: expr, $blinded: expr, $encrypted_payload: expr) => {
+                       let (encrypted_data_ss, onion_packet_ss) = build_keys!($pk, $blinded, $encrypted_payload);
+
+                       let msg_blinding_point_blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&msg_blinding_point.serialize()[..]);
+                               sha.input(encrypted_data_ss.as_ref());
+                               Sha256::from_engine(sha).into_inner()
+                       };
+
+                       msg_blinding_point_priv = msg_blinding_point_priv.mul_tweak(&Scalar::from_be_bytes(msg_blinding_point_blinding_factor).unwrap())?;
+                       msg_blinding_point = PublicKey::from_secret_key(secp_ctx, &msg_blinding_point_priv);
+
+                       let onion_packet_pubkey_blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&onion_packet_pubkey.serialize()[..]);
+                               sha.input(onion_packet_ss.as_ref());
+                               Sha256::from_engine(sha).into_inner()
+                       };
+                       onion_packet_pubkey_priv = onion_packet_pubkey_priv.mul_tweak(&Scalar::from_be_bytes(onion_packet_pubkey_blinding_factor).unwrap())?;
+                       onion_packet_pubkey = PublicKey::from_secret_key(secp_ctx, &onion_packet_pubkey_priv);
+               };
+       }
+
+       for pk in unblinded_path {
+               build_keys_in_loop!(*pk, false, None);
+       }
+       if let Some(dest) = destination {
+               match dest {
+                       Destination::Node(pk) => {
+                               build_keys!(pk, false, None);
+                       },
+                       Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => {
+                               for hop in blinded_hops {
+                                       build_keys_in_loop!(hop.blinded_node_id, true, Some(hop.encrypted_payload));
+                               }
+                       },
+               }
+       }
+       Ok(())
+}
index 37bf1ea5a6f9757d1da5a29f636aa0a853f2de8e..637a8c046ed2944bb3f32ceb348584881666c1ec 100644 (file)
@@ -16,13 +16,12 @@ use bitcoin::secp256k1;
 
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hashes::Hash;
-use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::transaction::TxOut;
-use bitcoin::blockdata::opcodes;
 use bitcoin::hash_types::BlockHash;
 
 use chain;
 use chain::Access;
+use ln::chan_utils::make_funding_redeemscript;
 use ln::features::{ChannelFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
 use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
@@ -41,7 +40,7 @@ use core::{cmp, fmt};
 use sync::{RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use sync::Mutex;
-use core::ops::Deref;
+use core::ops::{Bound, Deref};
 use bitcoin::hashes::hex::ToHex;
 
 #[cfg(feature = "std")]
@@ -318,56 +317,43 @@ where C::Target: chain::Access, L::Target: Logger
                Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
        }
 
-       fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
-               let mut result = Vec::with_capacity(batch_amount as usize);
+       fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
                let channels = self.network_graph.channels.read().unwrap();
-               let mut iter = channels.range(starting_point..);
-               while result.len() < batch_amount as usize {
-                       if let Some((_, ref chan)) = iter.next() {
-                               if chan.announcement_message.is_some() {
-                                       let chan_announcement = chan.announcement_message.clone().unwrap();
-                                       let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
-                                       let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
-                                       if let Some(one_to_two) = chan.one_to_two.as_ref() {
-                                               one_to_two_announcement = one_to_two.last_update_message.clone();
-                                       }
-                                       if let Some(two_to_one) = chan.two_to_one.as_ref() {
-                                               two_to_one_announcement = two_to_one.last_update_message.clone();
-                                       }
-                                       result.push((chan_announcement, one_to_two_announcement, two_to_one_announcement));
-                               } else {
-                                       // TODO: We may end up sending un-announced channel_updates if we are sending
-                                       // initial sync data while receiving announce/updates for this channel.
+               for (_, ref chan) in channels.range(starting_point..) {
+                       if chan.announcement_message.is_some() {
+                               let chan_announcement = chan.announcement_message.clone().unwrap();
+                               let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
+                               let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
+                               if let Some(one_to_two) = chan.one_to_two.as_ref() {
+                                       one_to_two_announcement = one_to_two.last_update_message.clone();
                                }
+                               if let Some(two_to_one) = chan.two_to_one.as_ref() {
+                                       two_to_one_announcement = two_to_one.last_update_message.clone();
+                               }
+                               return Some((chan_announcement, one_to_two_announcement, two_to_one_announcement));
                        } else {
-                               return result;
+                               // TODO: We may end up sending un-announced channel_updates if we are sending
+                               // initial sync data while receiving announce/updates for this channel.
                        }
                }
-               result
+               None
        }
 
-       fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
-               let mut result = Vec::with_capacity(batch_amount as usize);
+       fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> {
                let nodes = self.network_graph.nodes.read().unwrap();
-               let mut iter = if let Some(pubkey) = starting_point {
-                               let mut iter = nodes.range(NodeId::from_pubkey(pubkey)..);
-                               iter.next();
-                               iter
+               let iter = if let Some(pubkey) = starting_point {
+                               nodes.range((Bound::Excluded(NodeId::from_pubkey(pubkey)), Bound::Unbounded))
                        } else {
-                               nodes.range::<NodeId, _>(..)
+                               nodes.range(..)
                        };
-               while result.len() < batch_amount as usize {
-                       if let Some((_, ref node)) = iter.next() {
-                               if let Some(node_info) = node.announcement_info.as_ref() {
-                                       if node_info.announcement_message.is_some() {
-                                               result.push(node_info.announcement_message.clone().unwrap());
-                                       }
+               for (_, ref node) in iter {
+                       if let Some(node_info) = node.announcement_info.as_ref() {
+                               if let Some(msg) = node_info.announcement_message.clone() {
+                                       return Some(msg);
                                }
-                       } else {
-                               return result;
                        }
                }
-               result
+               None
        }
 
        /// Initiates a stateless sync of routing gossip information with a peer
@@ -929,7 +915,7 @@ impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> {
 ///
 /// While this may be smaller than the actual channel capacity, amounts greater than
 /// [`Self::as_msat`] should not be routed through the channel.
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
 pub enum EffectiveCapacity {
        /// The available liquidity in the channel known from being a channel counterparty, and thus a
        /// direct hop.
@@ -1447,6 +1433,39 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
                        return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
                }
 
+               {
+                       let channels = self.channels.read().unwrap();
+
+                       if let Some(chan) = channels.get(&msg.short_channel_id) {
+                               if chan.capacity_sats.is_some() {
+                                       // If we'd previously looked up the channel on-chain and checked the script
+                                       // against what appears on-chain, ignore the duplicate announcement.
+                                       //
+                                       // Because a reorg could replace one channel with another at the same SCID, if
+                                       // the channel appears to be different, we re-validate. This doesn't expose us
+                                       // to any more DoS risk than not, as a peer can always flood us with
+                                       // randomly-generated SCID values anyway.
+                                       //
+                                       // We use the Node IDs rather than the bitcoin_keys to check for "equivalence"
+                                       // as we didn't (necessarily) store the bitcoin keys, and we only really care
+                                       // if the peers on the channel changed anyway.
+                                       if NodeId::from_pubkey(&msg.node_id_1) == chan.node_one && NodeId::from_pubkey(&msg.node_id_2) == chan.node_two {
+                                               return Err(LightningError {
+                                                       err: "Already have chain-validated channel".to_owned(),
+                                                       action: ErrorAction::IgnoreDuplicateGossip
+                                               });
+                                       }
+                               } else if chain_access.is_none() {
+                                       // Similarly, if we can't check the chain right now anyway, ignore the
+                                       // duplicate announcement without bothering to take the channels write lock.
+                                       return Err(LightningError {
+                                               err: "Already have non-chain-validated channel".to_owned(),
+                                               action: ErrorAction::IgnoreDuplicateGossip
+                                       });
+                               }
+                       }
+               }
+
                let utxo_value = match &chain_access {
                        &None => {
                                // Tentatively accept, potentially exposing us to DoS attacks
@@ -1455,13 +1474,10 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
                        &Some(ref chain_access) => {
                                match chain_access.get_utxo(&msg.chain_hash, msg.short_channel_id) {
                                        Ok(TxOut { value, script_pubkey }) => {
-                                               let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
-                                                                                   .push_slice(&msg.bitcoin_key_1.serialize())
-                                                                                   .push_slice(&msg.bitcoin_key_2.serialize())
-                                                                                   .push_opcode(opcodes::all::OP_PUSHNUM_2)
-                                                                                   .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
+                                               let expected_script =
+                                                       make_funding_redeemscript(&msg.bitcoin_key_1, &msg.bitcoin_key_2).to_v0_p2wsh();
                                                if script_pubkey != expected_script {
-                                                       return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError});
+                                                       return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", expected_script.to_hex(), script_pubkey.to_hex()), action: ErrorAction::IgnoreError});
                                                }
                                                //TODO: Check if value is worth storing, use it to inform routing, and compare it
                                                //to the new HTLC max field in channel_update
@@ -1796,6 +1812,12 @@ impl ReadOnlyNetworkGraph<'_> {
                self.channels.get(&short_channel_id)
        }
 
+       #[cfg(c_bindings)] // Non-bindings users should use `channels`
+       /// Returns the list of channels in the graph
+       pub fn list_channels(&self) -> Vec<u64> {
+               self.channels.keys().map(|c| *c).collect()
+       }
+
        /// Returns all known nodes' public keys along with announced node info.
        ///
        /// (C-not exported) because we have no mapping for `BTreeMap`s
@@ -1808,6 +1830,12 @@ impl ReadOnlyNetworkGraph<'_> {
                self.nodes.get(node_id)
        }
 
+       #[cfg(c_bindings)] // Non-bindings users should use `nodes`
+       /// Returns the list of nodes in the graph
+       pub fn list_nodes(&self) -> Vec<NodeId> {
+               self.nodes.keys().map(|n| *n).collect()
+       }
+
        /// Get network addresses by node id.
        /// Returns None if the requested node is completely unknown,
        /// or if node announcement for the node was never received.
@@ -1824,6 +1852,7 @@ impl ReadOnlyNetworkGraph<'_> {
 #[cfg(test)]
 mod tests {
        use chain;
+       use ln::chan_utils::make_funding_redeemscript;
        use ln::PaymentHash;
        use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
        use routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
@@ -1841,9 +1870,8 @@ mod tests {
        use bitcoin::hashes::Hash;
        use bitcoin::network::constants::Network;
        use bitcoin::blockdata::constants::genesis_block;
-       use bitcoin::blockdata::script::{Builder, Script};
+       use bitcoin::blockdata::script::Script;
        use bitcoin::blockdata::transaction::TxOut;
-       use bitcoin::blockdata::opcodes;
 
        use hex;
 
@@ -1933,14 +1961,10 @@ mod tests {
        }
 
        fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> Script {
-               let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
-               let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap();
-               Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
-                             .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize())
-                             .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize())
-                             .push_opcode(opcodes::all::OP_PUSHNUM_2)
-                             .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script()
-                             .to_v0_p2wsh()
+               let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
+               let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
+               make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
+                       &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_v0_p2wsh()
        }
 
        fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelUpdate {
@@ -2055,7 +2079,7 @@ mod tests {
                // drop new one on the floor, since we can't see any changes.
                match gossip_sync.handle_channel_announcement(&valid_announcement) {
                        Ok(_) => panic!(),
-                       Err(e) => assert_eq!(e.err, "Already have knowledge of channel")
+                       Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel")
                };
 
                // Test if an associated transaction were not on-chain (or not confirmed).
@@ -2089,32 +2113,13 @@ mod tests {
                        };
                }
 
-               // If we receive announcement for the same channel (but TX is not confirmed),
-               // drop new one on the floor, since we can't see any changes.
-               *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx);
-               match gossip_sync.handle_channel_announcement(&valid_announcement) {
-                       Ok(_) => panic!(),
-                       Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry")
-               };
-
-               // But if it is confirmed, replace the channel
+               // If we receive announcement for the same channel, once we've validated it against the
+               // chain, we simply ignore all new (duplicate) announcements.
                *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script });
-               let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
-                       unsigned_announcement.features = ChannelFeatures::empty();
-                       unsigned_announcement.short_channel_id += 2;
-               }, node_1_privkey, node_2_privkey, &secp_ctx);
                match gossip_sync.handle_channel_announcement(&valid_announcement) {
-                       Ok(res) => assert!(res),
-                       _ => panic!()
+                       Ok(_) => panic!(),
+                       Err(e) => assert_eq!(e.err, "Already have chain-validated channel")
                };
-               {
-                       match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) {
-                               Some(channel_entry) => {
-                                       assert_eq!(channel_entry.features, ChannelFeatures::empty());
-                               },
-                               _ => panic!()
-                       };
-               }
 
                // Don't relay valid channels with excess data
                let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
@@ -2400,8 +2405,8 @@ mod tests {
                let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
 
                // Channels were not announced yet.
-               let channels_with_announcements = gossip_sync.get_next_channel_announcements(0, 1);
-               assert_eq!(channels_with_announcements.len(), 0);
+               let channels_with_announcements = gossip_sync.get_next_channel_announcement(0);
+               assert!(channels_with_announcements.is_none());
 
                let short_channel_id;
                {
@@ -2415,17 +2420,15 @@ mod tests {
                }
 
                // Contains initial channel announcement now.
-               let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
-               assert_eq!(channels_with_announcements.len(), 1);
-               if let Some(channel_announcements) = channels_with_announcements.first() {
-                       let &(_, ref update_1, ref update_2) = channel_announcements;
+               let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+               if let Some(channel_announcements) = channels_with_announcements {
+                       let (_, ref update_1, ref update_2) = channel_announcements;
                        assert_eq!(update_1, &None);
                        assert_eq!(update_2, &None);
                } else {
                        panic!();
                }
 
-
                {
                        // Valid channel update
                        let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| {
@@ -2438,10 +2441,9 @@ mod tests {
                }
 
                // Now contains an initial announcement and an update.
-               let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
-               assert_eq!(channels_with_announcements.len(), 1);
-               if let Some(channel_announcements) = channels_with_announcements.first() {
-                       let &(_, ref update_1, ref update_2) = channel_announcements;
+               let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+               if let Some(channel_announcements) = channels_with_announcements {
+                       let (_, ref update_1, ref update_2) = channel_announcements;
                        assert_ne!(update_1, &None);
                        assert_eq!(update_2, &None);
                } else {
@@ -2461,10 +2463,9 @@ mod tests {
                }
 
                // Test that announcements with excess data won't be returned
-               let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
-               assert_eq!(channels_with_announcements.len(), 1);
-               if let Some(channel_announcements) = channels_with_announcements.first() {
-                       let &(_, ref update_1, ref update_2) = channel_announcements;
+               let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+               if let Some(channel_announcements) = channels_with_announcements {
+                       let (_, ref update_1, ref update_2) = channel_announcements;
                        assert_eq!(update_1, &None);
                        assert_eq!(update_2, &None);
                } else {
@@ -2472,8 +2473,8 @@ mod tests {
                }
 
                // Further starting point have no channels after it
-               let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id + 1000, 1);
-               assert_eq!(channels_with_announcements.len(), 0);
+               let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id + 1000);
+               assert!(channels_with_announcements.is_none());
        }
 
        #[test]
@@ -2485,8 +2486,8 @@ mod tests {
                let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
 
                // No nodes yet.
-               let next_announcements = gossip_sync.get_next_node_announcements(None, 10);
-               assert_eq!(next_announcements.len(), 0);
+               let next_announcements = gossip_sync.get_next_node_announcement(None);
+               assert!(next_announcements.is_none());
 
                {
                        // Announce a channel to add 2 nodes
@@ -2497,10 +2498,9 @@ mod tests {
                        };
                }
 
-
                // Nodes were never announced
-               let next_announcements = gossip_sync.get_next_node_announcements(None, 3);
-               assert_eq!(next_announcements.len(), 0);
+               let next_announcements = gossip_sync.get_next_node_announcement(None);
+               assert!(next_announcements.is_none());
 
                {
                        let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
@@ -2516,12 +2516,12 @@ mod tests {
                        };
                }
 
-               let next_announcements = gossip_sync.get_next_node_announcements(None, 3);
-               assert_eq!(next_announcements.len(), 2);
+               let next_announcements = gossip_sync.get_next_node_announcement(None);
+               assert!(next_announcements.is_some());
 
                // Skip the first node.
-               let next_announcements = gossip_sync.get_next_node_announcements(Some(&node_id_1), 2);
-               assert_eq!(next_announcements.len(), 1);
+               let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
+               assert!(next_announcements.is_some());
 
                {
                        // Later announcement which should not be relayed (excess data) prevent us from sharing a node
@@ -2535,8 +2535,8 @@ mod tests {
                        };
                }
 
-               let next_announcements = gossip_sync.get_next_node_announcements(Some(&node_id_1), 2);
-               assert_eq!(next_announcements.len(), 0);
+               let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
+               assert!(next_announcements.is_none());
        }
 
        #[test]
@@ -2991,7 +2991,7 @@ mod tests {
                let legacy_chan_update_info_with_none: Vec<u8> = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
                let read_chan_update_info_res: Result<ChannelUpdateInfo, ::ln::msgs::DecodeError> = ::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
                assert!(read_chan_update_info_res.is_err());
-                       
+
                // 2. Test encoding/decoding of ChannelInfo
                // Check we can encode/decode ChannelInfo without ChannelUpdateInfo fields present.
                let chan_info_none_updates = ChannelInfo {
index 6ae339eae2bbc01776b840d655c2ef68ce6e83aa..a2492accf59cd9fe33fdf9d8ed533d8ff234c5a4 100644 (file)
@@ -221,7 +221,7 @@ impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> {
 }
 
 /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
 pub struct ChannelUsage {
        /// The amount to send through the channel, denominated in millisatoshis.
        pub amount_msat: u64,
index befe5d19f5f18f06454fd9140caf3fe026e9e8ec..1dbd91e65e0488168f59d439cdd4565f8d56803f 100644 (file)
@@ -286,10 +286,10 @@ mod fuzzy_chachapoly {
 
                pub(super) fn encrypt_in_place(&mut self, _input_output: &mut [u8]) {
                        assert!(self.finished == false);
-                       self.finished = true;
                }
 
                pub(super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
+                       assert!(self.finished == false);
                        out_tag.copy_from_slice(&self.tag);
                        self.finished = true;
                }
index 2f65e958f8b60d0f0a0f72d3cd3b7ca1b009085a..54ea6178798d25a3e6103c39419b886632024ca9 100644 (file)
@@ -126,6 +126,30 @@ pub struct ChannelHandshakeConfig {
        ///
        /// [`KeysInterface::get_shutdown_scriptpubkey`]: crate::chain::keysinterface::KeysInterface::get_shutdown_scriptpubkey
        pub commit_upfront_shutdown_pubkey: bool,
+
+       /// The Proportion of the channel value to configure as counterparty's channel reserve,
+       /// i.e., `their_channel_reserve_satoshis` for both outbound and inbound channels.
+       ///
+       /// `their_channel_reserve_satoshis` is the minimum balance that the other node has to maintain
+       /// on their side, at all times.
+       /// This ensures that if our counterparty broadcasts a revoked state, we can punish them by
+       /// claiming at least this value on chain.
+       ///
+       /// Channel reserve values greater than 30% could be considered highly unreasonable, since that
+       /// amount can never be used for payments.
+       /// Also, if our selected channel reserve for counterparty and counterparty's selected
+       /// channel reserve for us sum up to equal or greater than channel value, channel negotiations
+       /// will fail.
+       ///
+       /// Note: Versions of LDK earlier than v0.0.104 will fail to read channels with any channel reserve
+       /// other than the default value.
+       ///
+       /// Default value: 1% of channel value, i.e., configured as 10,000 millionths.
+       /// Minimum value: If the calculated proportional value is less than 1000 sats, it will be treated
+       ///                as 1000 sats instead, which is a safe implementation-specific lower bound.
+       /// Maximum value: 1,000,000, any values larger than 1 Million will be treated as 1 Million (or 100%)
+       ///                instead, although channel negotiations will fail in that case.
+       pub their_channel_reserve_proportional_millionths: u32
 }
 
 impl Default for ChannelHandshakeConfig {
@@ -138,6 +162,7 @@ impl Default for ChannelHandshakeConfig {
                        negotiate_scid_privacy: false,
                        announced_channel: false,
                        commit_upfront_shutdown_pubkey: true,
+                       their_channel_reserve_proportional_millionths: 10_000,
                }
        }
 }
index 42b551633258448fc1a3117a0997913b30edffc4..e86eae3c813a8af7a3684480171df913ea0883bd 100644 (file)
@@ -25,7 +25,7 @@ use routing::gossip::NetworkUpdate;
 use util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
 use routing::router::{RouteHop, RouteParameters};
 
-use bitcoin::Transaction;
+use bitcoin::{PackedLockTime, Transaction};
 use bitcoin::blockdata::script::Script;
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -907,7 +907,7 @@ impl MaybeReadable for Event {
                        11u8 => {
                                let f = || {
                                        let mut channel_id = [0; 32];
-                                       let mut transaction = Transaction{ version: 2, lock_time: 0, input: Vec::new(), output: Vec::new() };
+                                       let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                                        read_tlv_fields!(reader, {
                                                (0, channel_id, required),
                                                (2, transaction, required),
@@ -1195,6 +1195,12 @@ pub trait MessageSendEventsProvider {
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent>;
 }
 
+/// A trait indicating an object may generate onion messages to send
+pub trait OnionMessageProvider {
+       /// Gets the next pending onion message for the peer with the given node id.
+       fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage>;
+}
+
 /// A trait indicating an object may generate events.
 ///
 /// Events are processed by passing an [`EventHandler`] to [`process_pending_events`].
index eadfdbdfc67aaeea13477dc7b5315df4f4ab42cd..cd79a3f7bba8b3d500bf7036c1a202e4d6f2cd71 100644 (file)
@@ -91,7 +91,7 @@ impl<'a> core::fmt::Display for DebugTx<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
                if self.0.input.len() >= 1 && self.0.input.iter().any(|i| !i.witness.is_empty()) {
                        if self.0.input.len() == 1 && self.0.input[0].witness.last().unwrap().len() == 71 &&
-                                       (self.0.input[0].sequence >> 8*3) as u8 == 0x80 {
+                                       (self.0.input[0].sequence.0 >> 8*3) as u8 == 0x80 {
                                write!(f, "commitment tx ")?;
                        } else if self.0.input.len() == 1 && self.0.input[0].witness.last().unwrap().len() == 71 {
                                write!(f, "closing tx ")?;
@@ -160,6 +160,7 @@ macro_rules! log_internal {
 }
 
 /// Logs an entry at the given level.
+#[doc(hidden)]
 #[macro_export]
 macro_rules! log_given_level {
        ($logger: expr, $lvl:expr, $($arg:tt)+) => (
@@ -185,7 +186,7 @@ macro_rules! log_given_level {
        );
 }
 
-/// Log an error.
+/// Log at the `ERROR` level.
 #[macro_export]
 macro_rules! log_error {
        ($logger: expr, $($arg:tt)*) => (
@@ -193,25 +194,31 @@ macro_rules! log_error {
        )
 }
 
+/// Log at the `WARN` level.
+#[macro_export]
 macro_rules! log_warn {
        ($logger: expr, $($arg:tt)*) => (
                log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*);
        )
 }
 
+/// Log at the `INFO` level.
+#[macro_export]
 macro_rules! log_info {
        ($logger: expr, $($arg:tt)*) => (
                log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*);
        )
 }
 
+/// Log at the `DEBUG` level.
+#[macro_export]
 macro_rules! log_debug {
        ($logger: expr, $($arg:tt)*) => (
                log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*);
        )
 }
 
-/// Log a trace log.
+/// Log at the `TRACE` level.
 #[macro_export]
 macro_rules! log_trace {
        ($logger: expr, $($arg:tt)*) => (
@@ -219,7 +226,8 @@ macro_rules! log_trace {
        )
 }
 
-/// Log a gossip log.
+/// Log at the `GOSSIP` level.
+#[macro_export]
 macro_rules! log_gossip {
        ($logger: expr, $($arg:tt)*) => (
                log_given_level!($logger, $crate::util::logger::Level::Gossip, $($arg)*);
index 5b1a86a6a95f27a276f1dc221bd705f57fbf8ed4..852aa8f15892e5bbc3d14dd265ecf7f168044653 100644 (file)
@@ -244,6 +244,14 @@ pub(crate) trait LengthReadableArgs<P> where Self: Sized
        fn read<R: LengthRead>(reader: &mut R, params: P) -> Result<Self, DecodeError>;
 }
 
+/// A trait that various higher-level rust-lightning types implement allowing them to be read in
+/// from a Read, requiring the implementer to provide the total length of the read.
+pub(crate) trait LengthReadable where Self: Sized
+{
+       /// Reads a Self in from the given LengthRead
+       fn read<R: LengthRead>(reader: &mut R) -> Result<Self, DecodeError>;
+}
+
 /// A trait that various rust-lightning types implement allowing them to (maybe) be read in from a Read
 ///
 /// (C-not exported) as we only export serialization to/from byte arrays instead
@@ -392,7 +400,7 @@ impl Readable for BigSize {
 /// variable-length integer which is simply truncated by skipping high zero bytes. This type
 /// encapsulates such integers implementing Readable/Writeable for them.
 #[cfg_attr(test, derive(PartialEq, Debug))]
-pub(crate) struct HighZeroBytesDroppedVarInt<T>(pub T);
+pub(crate) struct HighZeroBytesDroppedBigSize<T>(pub T);
 
 macro_rules! impl_writeable_primitive {
        ($val_type:ty, $len: expr) => {
@@ -402,7 +410,7 @@ macro_rules! impl_writeable_primitive {
                                writer.write_all(&self.to_be_bytes())
                        }
                }
-               impl Writeable for HighZeroBytesDroppedVarInt<$val_type> {
+               impl Writeable for HighZeroBytesDroppedBigSize<$val_type> {
                        #[inline]
                        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                                // Skip any full leading 0 bytes when writing (in BE):
@@ -417,9 +425,9 @@ macro_rules! impl_writeable_primitive {
                                Ok(<$val_type>::from_be_bytes(buf))
                        }
                }
-               impl Readable for HighZeroBytesDroppedVarInt<$val_type> {
+               impl Readable for HighZeroBytesDroppedBigSize<$val_type> {
                        #[inline]
-                       fn read<R: Read>(reader: &mut R) -> Result<HighZeroBytesDroppedVarInt<$val_type>, DecodeError> {
+                       fn read<R: Read>(reader: &mut R) -> Result<HighZeroBytesDroppedBigSize<$val_type>, DecodeError> {
                                // We need to accept short reads (read_len == 0) as "EOF" and handle them as simply
                                // the high bytes being dropped. To do so, we start reading into the middle of buf
                                // and then convert the appropriate number of bytes with extra high bytes out of
@@ -435,7 +443,7 @@ macro_rules! impl_writeable_primitive {
                                        let first_byte = $len - ($len - total_read_len);
                                        let mut bytes = [0; $len];
                                        bytes.copy_from_slice(&buf[first_byte..first_byte + $len]);
-                                       Ok(HighZeroBytesDroppedVarInt(<$val_type>::from_be_bytes(bytes)))
+                                       Ok(HighZeroBytesDroppedBigSize(<$val_type>::from_be_bytes(bytes)))
                                } else {
                                        // If the encoding had extra zero bytes, return a failure even though we know
                                        // what they meant (as the TLV test vectors require this)
@@ -852,7 +860,7 @@ macro_rules! impl_consensus_ser {
        ($bitcoin_type: ty) => {
                impl Writeable for $bitcoin_type {
                        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-                               match self.consensus_encode(WriterWriteAdaptor(writer)) {
+                               match self.consensus_encode(&mut WriterWriteAdaptor(writer)) {
                                        Ok(_) => Ok(()),
                                        Err(e) => Err(e),
                                }
index 351c2a1f5e25e4d21ad0c999e2f064bf366a6f35..94990fcb8a17c8056dfd01d00b18db78d58e466f 100644 (file)
@@ -563,7 +563,7 @@ mod tests {
        use io::{self, Cursor};
        use prelude::*;
        use ln::msgs::DecodeError;
-       use util::ser::{Writeable, HighZeroBytesDroppedVarInt, VecWriter};
+       use util::ser::{Writeable, HighZeroBytesDroppedBigSize, VecWriter};
        use bitcoin::secp256k1::PublicKey;
 
        // The BOLT TLV test cases don't include any tests which use our "required-value" logic since
@@ -632,9 +632,9 @@ mod tests {
        }
 
        // BOLT TLV test cases
-       fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedVarInt<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
+       fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedBigSize<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
                let mut s = Cursor::new(s);
-               let mut tlv1: Option<HighZeroBytesDroppedVarInt<u64>> = None;
+               let mut tlv1: Option<HighZeroBytesDroppedBigSize<u64>> = None;
                let mut tlv2: Option<u64> = None;
                let mut tlv3: Option<(PublicKey, u64, u64)> = None;
                let mut tlv4: Option<u16> = None;
@@ -765,11 +765,11 @@ mod tests {
                assert_eq!(stream.0, ::hex::decode("06fd00ff02abcd").unwrap());
 
                stream.0.clear();
-               encode_varint_length_prefixed_tlv!(&mut stream, {(0, 1u64, required), (42, None::<u64>, option), (0xff, HighZeroBytesDroppedVarInt(0u64), required)});
+               encode_varint_length_prefixed_tlv!(&mut stream, {(0, 1u64, required), (42, None::<u64>, option), (0xff, HighZeroBytesDroppedBigSize(0u64), required)});
                assert_eq!(stream.0, ::hex::decode("0e00080000000000000001fd00ff00").unwrap());
 
                stream.0.clear();
-               encode_varint_length_prefixed_tlv!(&mut stream, {(0, Some(1u64), option), (0xff, HighZeroBytesDroppedVarInt(0u64), required)});
+               encode_varint_length_prefixed_tlv!(&mut stream, {(0, Some(1u64), option), (0xff, HighZeroBytesDroppedBigSize(0u64), required)});
                assert_eq!(stream.0, ::hex::decode("0e00080000000000000001fd00ff00").unwrap());
 
                Ok(())
index 9306fb254f42f8c071d7b6880b22b00bb072fd2d..b4d26904706240de647eec14ef97ef5b175ff8cf 100644 (file)
@@ -34,7 +34,8 @@ use bitcoin::blockdata::block::Block;
 use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 
-use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature, Scalar};
+use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 
 use regex;
@@ -44,12 +45,13 @@ use prelude::*;
 use core::time::Duration;
 use sync::{Mutex, Arc};
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
-use core::{cmp, mem};
+use core::mem;
 use bitcoin::bech32::u5;
 use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
 
 #[cfg(feature = "std")]
 use std::time::{SystemTime, UNIX_EPOCH};
+use bitcoin::Sequence;
 
 pub struct TestVecWriter(pub Vec<u8>);
 impl Writer for TestVecWriter {
@@ -73,6 +75,7 @@ impl keysinterface::KeysInterface for OnlyReadsKeysInterface {
        type Signer = EnforcingSigner;
 
        fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { unreachable!(); }
+       fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&Scalar>) -> Result<SharedSecret, ()> { unreachable!(); }
        fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!(); }
        fn get_destination_script(&self) -> Script { unreachable!(); }
        fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!(); }
@@ -239,10 +242,11 @@ impl TestBroadcaster {
 
 impl chaininterface::BroadcasterInterface for TestBroadcaster {
        fn broadcast_transaction(&self, tx: &Transaction) {
-               assert!(tx.lock_time < 1_500_000_000);
-               if tx.lock_time > self.blocks.lock().unwrap().len() as u32 + 1 && tx.lock_time < 500_000_000 {
+               let lock_time = tx.lock_time.0;
+               assert!(lock_time < 1_500_000_000);
+               if lock_time > self.blocks.lock().unwrap().len() as u32 + 1 && lock_time < 500_000_000 {
                        for inp in tx.input.iter() {
-                               if inp.sequence != 0xffffffff {
+                               if inp.sequence != Sequence::MAX {
                                        panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
                                }
                        }
@@ -443,23 +447,16 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                self.chan_upds_recvd.fetch_add(1, Ordering::AcqRel);
                Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
        }
-       fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
-               let mut chan_anns = Vec::new();
-               const TOTAL_UPDS: u64 = 50;
-               let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS);
-               for i in starting_point..end {
-                       let chan_upd_1 = get_dummy_channel_update(i);
-                       let chan_upd_2 = get_dummy_channel_update(i);
-                       let chan_ann = get_dummy_channel_announcement(i);
+       fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
+               let chan_upd_1 = get_dummy_channel_update(starting_point);
+               let chan_upd_2 = get_dummy_channel_update(starting_point);
+               let chan_ann = get_dummy_channel_announcement(starting_point);
 
-                       chan_anns.push((chan_ann, Some(chan_upd_1), Some(chan_upd_2)));
-               }
-
-               chan_anns
+               Some((chan_ann, Some(chan_upd_1), Some(chan_upd_2)))
        }
 
-       fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
-               Vec::new()
+       fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> {
+               None
        }
 
        fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
@@ -598,6 +595,9 @@ impl keysinterface::KeysInterface for TestKeysInterface {
        fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()> {
                self.backing.get_node_secret(recipient)
        }
+       fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+               self.backing.ecdh(recipient, other_key, tweak)
+       }
        fn get_inbound_payment_key_material(&self) -> keysinterface::KeyMaterial {
                self.backing.get_inbound_payment_key_material()
        }
@@ -723,7 +723,6 @@ pub struct TestChainSource {
        pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,
        pub watched_txn: Mutex<HashSet<(Txid, Script)>>,
        pub watched_outputs: Mutex<HashSet<(OutPoint, Script)>>,
-       expectations: Mutex<Option<VecDeque<OnRegisterOutput>>>,
 }
 
 impl TestChainSource {
@@ -734,17 +733,8 @@ impl TestChainSource {
                        utxo_ret: Mutex::new(Ok(TxOut { value: u64::max_value(), script_pubkey })),
                        watched_txn: Mutex::new(HashSet::new()),
                        watched_outputs: Mutex::new(HashSet::new()),
-                       expectations: Mutex::new(None),
                }
        }
-
-       /// Sets an expectation that [`chain::Filter::register_output`] is called.
-       pub fn expect(&self, expectation: OnRegisterOutput) -> &Self {
-               self.expectations.lock().unwrap()
-                       .get_or_insert_with(|| VecDeque::new())
-                       .push_back(expectation);
-               self
-       }
 }
 
 impl chain::Access for TestChainSource {
@@ -762,24 +752,8 @@ impl chain::Filter for TestChainSource {
                self.watched_txn.lock().unwrap().insert((*txid, script_pubkey.clone()));
        }
 
-       fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)> {
-               let dependent_tx = match &mut *self.expectations.lock().unwrap() {
-                       None => None,
-                       Some(expectations) => match expectations.pop_front() {
-                               None => {
-                                       panic!("Unexpected register_output: {:?}",
-                                               (output.outpoint, output.script_pubkey));
-                               },
-                               Some(expectation) => {
-                                       assert_eq!(output.outpoint, expectation.outpoint());
-                                       assert_eq!(&output.script_pubkey, expectation.script_pubkey());
-                                       expectation.returns
-                               },
-                       },
-               };
-
+       fn register_output(&self, output: WatchedOutput) {
                self.watched_outputs.lock().unwrap().insert((output.outpoint, output.script_pubkey));
-               dependent_tx
        }
 }
 
@@ -788,47 +762,6 @@ impl Drop for TestChainSource {
                if panicking() {
                        return;
                }
-
-               if let Some(expectations) = &*self.expectations.lock().unwrap() {
-                       if !expectations.is_empty() {
-                               panic!("Unsatisfied expectations: {:?}", expectations);
-                       }
-               }
-       }
-}
-
-/// An expectation that [`chain::Filter::register_output`] was called with a transaction output and
-/// returns an optional dependent transaction that spends the output in the same block.
-pub struct OnRegisterOutput {
-       /// The transaction output to register.
-       pub with: TxOutReference,
-
-       /// A dependent transaction spending the output along with its position in the block.
-       pub returns: Option<(usize, Transaction)>,
-}
-
-/// A transaction output as identified by an index into a transaction's output list.
-pub struct TxOutReference(pub Transaction, pub usize);
-
-impl OnRegisterOutput {
-       fn outpoint(&self) -> OutPoint {
-               let txid = self.with.0.txid();
-               let index = self.with.1 as u16;
-               OutPoint { txid, index }
-       }
-
-       fn script_pubkey(&self) -> &Script {
-               let index = self.with.1;
-               &self.with.0.output[index].script_pubkey
-       }
-}
-
-impl core::fmt::Debug for OnRegisterOutput {
-       fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-               f.debug_struct("OnRegisterOutput")
-                       .field("outpoint", &self.outpoint())
-                       .field("script_pubkey", self.script_pubkey())
-                       .finish()
        }
 }
 
index 12768543783cd148b7874a0009aa6b44650cd71a..028a08345edebaa867bead864cfda7198b218924 100644 (file)
@@ -56,7 +56,7 @@ pub(crate) fn maybe_add_change_output(tx: &mut Transaction, input_value: u64, wi
        weight_with_change += (VarInt(tx.output.len() as u64 + 1).len() - VarInt(tx.output.len() as u64).len()) as i64 * 4;
        // When calculating weight, add two for the flag bytes
        let change_value: i64 = (input_value - output_value) as i64 - weight_with_change * feerate_sat_per_1000_weight as i64 / 1000;
-       if change_value >= dust_value.as_sat() as i64 {
+       if change_value >= dust_value.to_sat() as i64 {
                change_output.value = change_value as u64;
                tx.output.push(change_output);
                Ok(weight_with_change as usize)
@@ -75,9 +75,8 @@ mod tests {
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::hash_types::{PubkeyHash, Txid};
 
-       use bitcoin::hashes::sha256d::Hash as Sha256dHash;
        use bitcoin::hashes::Hash;
-       use bitcoin::Witness;
+       use bitcoin::{PackedLockTime, Sequence, Witness};
 
        use hex::decode;
 
@@ -215,7 +214,7 @@ mod tests {
        #[test]
        fn test_tx_value_overrun() {
                // If we have a bogus input amount or outputs valued more than inputs, we should fail
-               let mut tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+               let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                        script_pubkey: Script::new(), value: 1000
                }] };
                assert!(maybe_add_change_output(&mut tx, 21_000_000_0000_0001, 0, 253, Script::new()).is_err());
@@ -226,10 +225,10 @@ mod tests {
        #[test]
        fn test_tx_change_edge() {
                // Check that we never add dust outputs
-               let mut tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: Vec::new() };
+               let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                let orig_wtxid = tx.wtxid();
                let output_spk = Script::new_p2pkh(&PubkeyHash::hash(&[0; 0]));
-               assert_eq!(output_spk.dust_value().as_sat(), 546);
+               assert_eq!(output_spk.dust_value().to_sat(), 546);
                // 9 sats isn't enough to pay fee on a dummy transaction...
                assert_eq!(tx.weight() as u64, 40); // ie 10 vbytes
                assert!(maybe_add_change_output(&mut tx, 9, 0, 250, output_spk.clone()).is_err());
@@ -260,8 +259,8 @@ mod tests {
        #[test]
        fn test_tx_extra_outputs() {
                // Check that we correctly handle existing outputs
-               let mut tx = Transaction { version: 2, lock_time: 0, input: vec![TxIn {
-                       previous_output: OutPoint::new(Txid::from_hash(Sha256dHash::default()), 0), script_sig: Script::new(), witness: Witness::new(), sequence: 0,
+               let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: vec![TxIn {
+                       previous_output: OutPoint::new(Txid::all_zeros(), 0), script_sig: Script::new(), witness: Witness::new(), sequence: Sequence::ZERO,
                }], output: vec![TxOut {
                        script_pubkey: Builder::new().push_int(1).into_script(), value: 1000
                }] };
@@ -269,7 +268,7 @@ mod tests {
                let orig_weight = tx.weight();
                assert_eq!(orig_weight / 4, 61);
 
-               assert_eq!(Builder::new().push_int(2).into_script().dust_value().as_sat(), 474);
+               assert_eq!(Builder::new().push_int(2).into_script().dust_value().to_sat(), 474);
 
                // Input value of the output value + fee - 1 should fail:
                assert!(maybe_add_change_output(&mut tx, 1000 + 61 + 100 - 1, 400, 250, Builder::new().push_int(2).into_script()).is_err());