Merge pull request #1062 from galderz/t_payment_hash_999
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 8 Oct 2021 20:49:19 +0000 (20:49 +0000)
committerGitHub <noreply@github.com>
Fri, 8 Oct 2021 20:49:19 +0000 (20:49 +0000)
34 files changed:
CHANGELOG.md
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/convert.rs
lightning-block-sync/src/http.rs
lightning-invoice/Cargo.toml
lightning-net-tokio/Cargo.toml
lightning-persister/Cargo.toml
lightning-persister/src/lib.rs
lightning/Cargo.toml
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/payment_tests.rs [new file with mode: 0644]
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/script.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/ln/wire.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/util/events.rs
lightning/src/util/ser.rs

index a23f9d76a957b61fe5be5cb28f23f542b317286e..24a8c96ffaa4e94d88139a31549be0d4c079e0ec 100644 (file)
@@ -1,3 +1,105 @@
+# 0.0.101 - 2021-09-23
+
+## API Updates
+ * Custom message types are now supported directly in the `PeerManager`,
+   allowing you to send and receive messages of any type that is not natively
+   understood by LDK. This requires a new type bound on `PeerManager`, a
+   `CustomMessageHandler`. `IgnoringMessageHandler` provides a simple default
+   for this new bound for ignoring unknown messages (#1031, #1074).
+ * Route graph updates as a result of failed payments are no longer provided as
+   `MessageSendEvent::PaymentFailureNetworkUpdate` but instead included in a
+   new field in the `Event::PaymentFailed` events. Generally, this means route
+   graph updates are no longer handled as a part of the `PeerManager` but
+   instead through the new `EventHandler` implementation for
+   `NetGraphMsgHandler`. To make this easy, a new parameter to
+   `lightning-background-processor::BackgroundProcessor::start` is added, which
+   contains an `Option`al `NetGraphmsgHandler`. If provided as `Some`, relevant
+   events will be processed by the `NetGraphMsgHandler` prior to normal event
+   handling (#1043).
+ * `NetworkGraph` is now, itself, thread-safe. Accordingly, most functions now
+   take `&self` instead of `&mut self` and the graph data can be accessed
+   through `NetworkGraph.read_only` (#1043).
+ * The balances available on-chain to claim after a channel has been closed are
+   now exposed via `ChannelMonitor::get_claimable_balances` and
+   `ChainMonitor::get_claimable_balances`. The second can be used to get
+   information about all closed channels which still have on-chain balances
+   associated with them. See enum variants of `ln::channelmonitor::Balance` and
+   method documentation for the above methods for more information on the types
+   of balances exposed (#1034).
+ * When one HTLC of a multi-path payment fails, the new field `all_paths_failed`
+   in `Event::PaymentFailed` is set to `false`. This implies that the payment
+   has not failed, but only one part. Payment resolution is only indicated by an
+   `Event::PaymentSent` event or an `Event::PaymentFailed` with
+   `all_paths_failed` set to `true`, which is also set for the last remaining
+   part of a multi-path payment (#1053).
+ * To better capture the context described above, `Event::PaymentFailed` has
+   been renamed to `Event::PaymentPathFailed` (#1084).
+ * A new event, `ChannelClosed`, is provided by `ChannelManager` when a channel
+   is closed, including a reason and error message (if relevant, #997).
+ * `lightning-invoice` now considers invoices with sub-millisatoshi precision
+   to be invalid, and requires millisatoshi values during construction (thus
+   you must call `amount_milli_satoshis` instead of `amount_pico_btc`, #1057).
+ * The `BaseSign` interface now includes two new hooks which provide additional
+   information about commitment transaction signatures and revocation secrets
+   provided by our counterparty, allowing additional verification (#1039).
+ * The `BaseSign` interface now includes additional information for cooperative
+   close transactions, making it easier for a signer to verify requests (#1064).
+ * `Route` has two additional helper methods to get fees and amounts (#1063).
+ * `Txid` and `Transaction` objects can now be deserialized from responses when
+   using the HTTP client in the `lightning-block-sync` crate (#1037, #1061).
+
+## Bug Fixes
+ * Fix a panic when reading a lightning invoice with a non-recoverable
+   signature. Further, restrict lightning invoice parsing to require payment
+   secrets and better handle a few edge cases as required by BOLT 11 (#1057).
+ * Fix a panic when receiving multiple messages (such as HTLC fulfill messages)
+   after a call to `chain::Watch::update_channel` returned
+   `Err(ChannelMonitorUpdateErr::TemporaryFailure)` with no
+   `ChannelManager::channel_monitor_updated` call in between (#1066).
+ * For multi-path payments, `Event::PaymentSent` is no longer generated
+   multiple times, once for each independent part (#1053).
+ * Multi-hop route hints in invoices are now considered in the default router
+   provided via `get_route` (#1040).
+ * The time peers have to respond to pings has been increased when building
+   with debug assertions enabled. This avoids peer disconnections on slow hosts
+   when running in debug mode (#1051).
+ * The timeout for the first byte of a response for requests from the
+   `lightning-block-sync` crate has been increased to 300 seconds to better
+   handle the long hangs in Bitcoin Core when it syncs to disk (#1090).
+
+## Serialization Compatibility
+ * Due to a bug in 0.0.100, `Event`s written by 0.0.101 which are of a type not
+   understood by 0.0.100 may lead to `Err(DecodeError::InvalidValue)` or corrupt
+   deserialized objects in 0.100. Such `Event`s will lead to an
+   `Err(DecodeError::InvalidValue)` in versions prior to 0.0.100. The only such
+   new event written by 0.0.101 is `Event::ChannelClosed` (#1087).
+ * Payments that were initiated in versions prior to 0.0.101 may still
+   generate duplicate `PaymentSent` `Event`s or may have spurious values for
+   `Event::PaymentPathFailed::all_paths_failed` (#1053).
+ * The return values of `ChannelMonitor::get_claimable_balances` (and, thus,
+   `ChainMonitor::get_claimable_balances`) may be spurious for channels where
+   the spend of the funding transaction appeared on chain while running a
+   version prior to 0.0.101. `Balance` information should only be relied upon
+   for channels that were closed while running 0.0.101+ (#1034).
+ * Payments failed while running versions prior to 0.0.101 will never have a
+   `Some` for the `network_update` field (#1043).
+
+In total, this release features 67 files changed, 4980 insertions, 1888
+deletions in 89 commits from 12 authors, in alphabetical order:
+ * Antoine Riard
+ * Devrandom
+ * Galder ZamarrenĖƒo
+ * Giles Cope
+ * Jeffrey Czyz
+ * Joseph Goulden
+ * Matt Corallo
+ * Sergi Delgado Segura
+ * Tibo-lg
+ * Valentine Wallace
+ * abhik-99
+ * vss96
+
+
 # 0.0.100 - 2021-08-17
 
 ## API Updates
index f85ac9b9def2839542eefa00b0777a2838612302..4177b50275931722565fc1468aa7e5b08f29d76d 100644 (file)
@@ -827,7 +827,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                        }
                                                },
                                                events::Event::PaymentSent { .. } => {},
-                                               events::Event::PaymentFailed { .. } => {},
+                                               events::Event::PaymentPathFailed { .. } => {},
                                                events::Event::PaymentForwarded { .. } if $node == 1 => {},
                                                events::Event::PendingHTLCsForwardable { .. } => {
                                                        nodes[$node].process_pending_htlc_forwards();
index d82ff88d9c7d20acba55db6e8096a48b38015555..799c2eb8cb94b2f3a1bdbf2abcfc9c43809f4fd6 100644 (file)
@@ -680,7 +680,7 @@ mod tests {
                // 030012 - inbound read from peer id 0 of len 18
                // 0141 03000000000000000000000000000000 - message header indicating message length 321
                // 0300fe - inbound read from peer id 0 of len 254
-               // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 000000000000014a ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
+               // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
                // 030053 - inbound read from peer id 0 of len 83
                // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac
                //
@@ -731,7 +731,7 @@ mod tests {
                // 030112 - inbound read from peer id 1 of len 18
                // 0110 01000000000000000000000000000000 - message header indicating message length 272
                // 0301ff - inbound read from peer id 1 of len 255
-               // 0021 0000000000000000000000000000000000000000000000000000000000000e05 000000000000014a 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel
+               // 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel
                // 030121 - inbound read from peer id 1 of len 33
                // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac
                //
@@ -927,7 +927,7 @@ mod tests {
                // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
 
                let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
-               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c3500000000000000000000000000000014affffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000014a00000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
 
                let log_entries = logger.lines.lock().unwrap();
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
index 98d9ade945e5222bca928daa6739c060d60ad1f8..178ffba80403acbffc35e4e7cb127e039fbe5a43 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-background-processor"
-version = "0.0.100"
+version = "0.0.101"
 authors = ["Valentine Wallace <vwallace@protonmail.com>"]
 license = "MIT OR Apache-2.0"
 repository = "http://github.com/rust-bitcoin/rust-lightning"
@@ -11,9 +11,9 @@ edition = "2018"
 
 [dependencies]
 bitcoin = "0.27"
-lightning = { version = "0.0.100", path = "../lightning", features = ["allow_wallclock_use"] }
-lightning-persister = { version = "0.0.100", path = "../lightning-persister" }
+lightning = { version = "0.0.101", path = "../lightning", features = ["allow_wallclock_use"] }
+lightning-persister = { version = "0.0.101", path = "../lightning-persister" }
 
 [dev-dependencies]
-lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.101", path = "../lightning", features = ["_test_utils"] }
 
index 4e6fb6f02dfdf7a2f43f21d9f176f5f528f270d3..c7d55ae61b7ecdc9ebb586654efd76a0dd650397 100644 (file)
@@ -614,6 +614,7 @@ mod tests {
                        .expect("SpendableOutputs not handled within deadline");
                match event {
                        Event::SpendableOutputs { .. } => {},
+                       Event::ChannelClosed { .. } => {},
                        _ => panic!("Unexpected event: {:?}", event),
                }
 
index 0902cc3206905c5ed496a85eb4745a25d6db0264..5e0778cf7c7b87f58c0de61462ded34342e355b6 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-block-sync"
-version = "0.0.100"
+version = "0.0.101"
 authors = ["Jeffrey Czyz", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "http://github.com/rust-bitcoin/rust-lightning"
@@ -15,7 +15,7 @@ rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
 
 [dependencies]
 bitcoin = "0.27"
-lightning = { version = "0.0.100", path = "../lightning" }
+lightning = { version = "0.0.101", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
 serde = { version = "1.0", features = ["derive"], optional = true }
 serde_json = { version = "1.0", optional = true }
index e8e1427bdb654ffa432d069faf37457fc5a74cea..358076f4c25a883f5e571b421f9d3ff1ebe20f27 100644 (file)
@@ -1,11 +1,12 @@
-use crate::{BlockHeaderData, BlockSourceError};
 use crate::http::{BinaryResponse, JsonResponse};
 use crate::utils::hex_to_uint256;
+use crate::{BlockHeaderData, BlockSourceError};
 
 use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::consensus::encode;
 use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid};
-use bitcoin::hashes::hex::{ToHex, FromHex};
+use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::Transaction;
 
 use serde::Deserialize;
 
@@ -104,7 +105,6 @@ impl TryFrom<GetHeaderResponse> for BlockHeaderData {
        }
 }
 
-
 /// Converts a JSON value into a block. Assumes the block is hex-encoded in a JSON string.
 impl TryInto<Block> for JsonResponse {
        type Error = std::io::Error;
@@ -181,13 +181,77 @@ impl TryInto<Txid> for JsonResponse {
        }
 }
 
+/// Converts a JSON value into a transaction. WATCH OUT! this cannot be used for zero-input transactions
+/// (e.g. createrawtransaction). See https://github.com/rust-bitcoin/rust-bitcoincore-rpc/issues/197
+impl TryInto<Transaction> for JsonResponse {
+       type Error = std::io::Error;
+       fn try_into(self) -> std::io::Result<Transaction> {
+               let hex_tx = if self.0.is_object() {
+                       // result is json encoded
+                       match &self.0["hex"] {
+                               // result has hex field
+                               serde_json::Value::String(hex_data) => match self.0["complete"] {
+                                       // result may or may not be signed (e.g. signrawtransactionwithwallet)
+                                       serde_json::Value::Bool(x) => {
+                                               if x == false {
+                                                       let reason = match &self.0["errors"][0]["error"] {
+                                                               serde_json::Value::String(x) => x.as_str(),
+                                                               _ => "Unknown error",
+                                                       };
+
+                                                       return Err(std::io::Error::new(
+                                                               std::io::ErrorKind::InvalidData,
+                                                               format!("transaction couldn't be signed. {}", reason),
+                                                       ));
+                                               } else {
+                                                       hex_data
+                                               }
+                                       }
+                                       // result is a complete transaction (e.g. getrawtranaction verbose)
+                                       _ => hex_data,
+                               },
+                               _ => return Err(std::io::Error::new(
+                                                       std::io::ErrorKind::InvalidData,
+                                                       "expected JSON string",
+                                       )),
+                       }
+               } else {
+                       // result is plain text (e.g. getrawtransaction no verbose)
+                       match self.0.as_str() {
+                               Some(hex_tx) => hex_tx,
+                               None => {
+                                       return Err(std::io::Error::new(
+                                               std::io::ErrorKind::InvalidData,
+                                               "expected JSON string",
+                                       ))
+                               }
+                       }
+               };
+
+               match Vec::<u8>::from_hex(hex_tx) {
+                       Err(_) => Err(std::io::Error::new(
+                               std::io::ErrorKind::InvalidData,
+                               "invalid hex data",
+                       )),
+                       Ok(tx_data) => match encode::deserialize(&tx_data) {
+                               Err(_) => Err(std::io::Error::new(
+                                       std::io::ErrorKind::InvalidData,
+                                       "invalid transaction",
+                               )),
+                               Ok(tx) => Ok(tx),
+                       },
+               }
+       }
+}
+
 #[cfg(test)]
 pub(crate) mod tests {
        use super::*;
        use bitcoin::blockdata::constants::genesis_block;
-       use bitcoin::consensus::encode;
        use bitcoin::hashes::Hash;
        use bitcoin::network::constants::Network;
+       use serde_json::value::Number;
+       use serde_json::Value;
 
        /// Converts from `BlockHeaderData` into a `GetHeaderResponse` JSON value.
        impl From<BlockHeaderData> for serde_json::Value {
@@ -541,4 +605,101 @@ pub(crate) mod tests {
                        Ok(txid) => assert_eq!(txid, target_txid),
                }
        }
+
+       // TryInto<Transaction> can be used in two ways, first with plain hex response where data is
+       // the hex encoded transaction (e.g. as a result of getrawtransaction) or as a JSON object
+       // where the hex encoded transaction can be found in the hex field of the object (if present)
+       // (e.g. as a result of signrawtransactionwithwallet).
+
+       // plain hex transaction
+
+       #[test]
+       fn into_tx_from_json_response_with_invalid_hex_data() {
+               let response = JsonResponse(serde_json::json!("foobar"));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
+                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid hex data");
+                       }
+                       Ok(_) => panic!("Expected error"),
+               }
+       }
+
+       #[test]
+       fn into_tx_from_json_response_with_invalid_data_type() {
+               let response = JsonResponse(Value::Number(Number::from_f64(1.0).unwrap()));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
+                               assert_eq!(e.get_ref().unwrap().to_string(), "expected JSON string");
+                       }
+                       Ok(_) => panic!("Expected error"),
+               }
+       }
+
+       #[test]
+       fn into_tx_from_json_response_with_invalid_tx_data() {
+               let response = JsonResponse(serde_json::json!("abcd"));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
+                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid transaction");
+                       }
+                       Ok(_) => panic!("Expected error"),
+               }
+       }
+
+       #[test]
+       fn into_tx_from_json_response_with_valid_tx_data_plain() {
+               let genesis_block = genesis_block(Network::Bitcoin);
+               let target_tx = genesis_block.txdata.get(0).unwrap();
+               let response = JsonResponse(serde_json::json!(encode::serialize_hex(&target_tx)));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => panic!("Unexpected error: {:?}", e),
+                       Ok(tx) => assert_eq!(&tx, target_tx),
+               }
+       }
+
+       #[test]
+       fn into_tx_from_json_response_with_valid_tx_data_hex_field() {
+               let genesis_block = genesis_block(Network::Bitcoin);
+               let target_tx = genesis_block.txdata.get(0).unwrap();
+               let response = JsonResponse(serde_json::json!({"hex": encode::serialize_hex(&target_tx)}));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => panic!("Unexpected error: {:?}", e),
+                       Ok(tx) => assert_eq!(&tx, target_tx),
+               }
+       }
+
+       // transaction in hex field of JSON object
+
+       #[test]
+       fn into_tx_from_json_response_with_no_hex_field() {
+               let response = JsonResponse(serde_json::json!({ "error": "foo" }));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
+                               assert_eq!(
+                                       e.get_ref().unwrap().to_string(),
+                                       "expected JSON string"
+                               );
+                       }
+                       Ok(_) => panic!("Expected error"),
+               }
+       }
+
+       #[test]
+       fn into_tx_from_json_response_not_signed() {
+               let response = JsonResponse(serde_json::json!({ "hex": "foo", "complete": false }));
+               match TryInto::<Transaction>::try_into(response) {
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
+                               assert!(
+                                       e.get_ref().unwrap().to_string().contains(
+                                       "transaction couldn't be signed")
+                               );
+                       }
+                       Ok(_) => panic!("Expected error"),
+               }
+       }
 }
index 0721babfde3d1b626051ba1ccccb7240d1f5a5a7..a3935edf5cb6e9880217196112d8f4ead5bacd57 100644 (file)
@@ -27,9 +27,10 @@ const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
 
 /// Timeout for reading the first byte of a response. This is separate from the general read
 /// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for
-/// upwards of a minute or more. Note that we always retry once when we time out, so the maximum
-/// time we allow Bitcoin Core to block for is twice this value.
-const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(120);
+/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry
+/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this
+/// value.
+const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
 
 /// Maximum HTTP message header size in bytes.
 const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
index baa9a79c5d5af3a055fdbc1d7894dccb47338e32..1169d9283cb67ceae0635d9a66e1a6104194d774 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "lightning-invoice"
 description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.8.0"
+version = "0.9.0"
 authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
 documentation = "https://docs.rs/lightning-invoice/"
 license = "MIT OR Apache-2.0"
@@ -10,11 +10,11 @@ readme = "README.md"
 
 [dependencies]
 bech32 = "0.8"
-lightning = { version = "0.0.100", path = "../lightning" }
+lightning = { version = "0.0.101", path = "../lightning" }
 secp256k1 = { version = "0.20", features = ["recovery"] }
 num-traits = "0.2.8"
 bitcoin_hashes = "0.10"
 
 [dev-dependencies]
 hex = "0.3"
-lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.101", path = "../lightning", features = ["_test_utils"] }
index 053766e10f3f7b164d779739c8231b81c87d8817..ad13c8040f24204745eceae06fa2360fd4ab7f39 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-net-tokio"
-version = "0.0.100"
+version = "0.0.101"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/rust-bitcoin/rust-lightning/"
@@ -12,7 +12,7 @@ edition = "2018"
 
 [dependencies]
 bitcoin = "0.27"
-lightning = { version = "0.0.100", path = "../lightning" }
+lightning = { version = "0.0.101", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
index cc585b9c0e8c0df612120c63322ca57c66ca094e..ded73551e75c4254c9ef646eb075d488e8c5a9ae 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-persister"
-version = "0.0.100"
+version = "0.0.101"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/rust-bitcoin/rust-lightning/"
@@ -13,11 +13,11 @@ unstable = ["lightning/unstable"]
 
 [dependencies]
 bitcoin = "0.27"
-lightning = { version = "0.0.100", path = "../lightning" }
+lightning = { version = "0.0.101", path = "../lightning" }
 libc = "0.2"
 
 [target.'cfg(windows)'.dependencies]
 winapi = { version = "0.3", features = ["winbase"] }
 
 [dev-dependencies]
-lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.101", path = "../lightning", features = ["_test_utils"] }
index d68a06d71b4fe58ec1af2bcb1117df8f1dddd7e5..6cfa540cf1ec9c32a89a13bf1ba995e8dd3808f5 100644 (file)
@@ -182,11 +182,11 @@ mod tests {
        use bitcoin::Txid;
        use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr};
        use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_added_monitors};
+       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
        use lightning::ln::features::InitFeatures;
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::ErrorAction;
-       use lightning::util::events::{MessageSendEventsProvider, MessageSendEvent};
+       use lightning::util::events::{ClosureReason, Event, MessageSendEventsProvider, MessageSendEvent};
        use lightning::util::test_utils;
        use std::fs;
        #[cfg(target_os = "windows")]
@@ -259,6 +259,7 @@ mod tests {
                // Force close because cooperative close doesn't result in any persisted
                // updates.
                nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 
@@ -268,6 +269,7 @@ mod tests {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
                check_closed_broadcast!(nodes[1], true);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                check_added_monitors!(nodes[1], 1);
 
                // Make sure everything is persisted as expected after close.
@@ -291,6 +293,7 @@ mod tests {
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
                nodes[1].node.force_close_channel(&chan.2).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
 
                // Set the persister's directory to read-only, which should result in
@@ -327,6 +330,7 @@ mod tests {
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
                nodes[1].node.force_close_channel(&chan.2).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
 
                // Create the persister with an invalid directory name and test that the
index 1ef4c74f991108778aabe77d6fbc633bd1314ba0..9d6217b04746f81c4ecd38fa9d5090b40feb4aff 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning"
-version = "0.0.100"
+version = "0.0.101"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/rust-bitcoin/rust-lightning/"
index 101ad6652007ff46373fd877a066efa1fd111992..0d3f87645ce4c4c1250d2d42a3a085f419ac13a6 100644 (file)
@@ -150,7 +150,7 @@ where C::Target: chain::Filter,
        ///
        /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
        /// inclusion in the return value.
-       pub fn get_claimable_balances(&self, ignored_channels: &[ChannelDetails]) -> Vec<Balance> {
+       pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
                let mut ret = Vec::new();
                let monitors = self.monitors.read().unwrap();
                for (_, monitor) in monitors.iter().filter(|(funding_outpoint, _)| {
index 413d184b598c1c01564c3f3fb9350f402b87badf..b89c58646fbfd960ccf98a5736fc4d300b26db8f 100644 (file)
@@ -190,8 +190,8 @@ pub enum MonitorEvent {
        /// A monitor event containing an HTLCUpdate.
        HTLCEvent(HTLCUpdate),
 
-       /// A monitor event that the Channel's commitment transaction was broadcasted.
-       CommitmentTxBroadcasted(OutPoint),
+       /// A monitor event that the Channel's commitment transaction was confirmed.
+       CommitmentTxConfirmed(OutPoint),
 }
 
 /// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
@@ -918,7 +918,7 @@ impl<Signer: Sign> Writeable for ChannelMonitorImpl<Signer> {
                                        0u8.write(writer)?;
                                        upd.write(writer)?;
                                },
-                               MonitorEvent::CommitmentTxBroadcasted(_) => 1u8.write(writer)?
+                               MonitorEvent::CommitmentTxConfirmed(_) => 1u8.write(writer)?
                        }
                }
 
@@ -1787,7 +1787,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        log_info!(logger, "Broadcasting local {}", log_tx!(tx));
                        broadcaster.broadcast_transaction(tx);
                }
-               self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+               self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
        }
 
        pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError>
@@ -1835,7 +1835,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
                                        } else {
-                                               // If we generated a MonitorEvent::CommitmentTxBroadcasted, the ChannelManager
+                                               // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager
                                                // will still give us a ChannelForceClosed event with !should_broadcast, but we
                                                // shouldn't print the scary warning above.
                                                log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction.");
@@ -2357,7 +2357,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone());
                        let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
                        claimable_outpoints.push(commitment_package);
-                       self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+                       self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
                        let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
                        self.holder_tx_signed = true;
                        // Because we're broadcasting a commitment transaction, we should construct the package
@@ -3110,7 +3110,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                for _ in 0..pending_monitor_events_len {
                        let ev = match <u8 as Readable>::read(reader)? {
                                0 => MonitorEvent::HTLCEvent(Readable::read(reader)?),
-                               1 => MonitorEvent::CommitmentTxBroadcasted(funding_info.0),
+                               1 => MonitorEvent::CommitmentTxConfirmed(funding_info.0),
                                _ => return Err(DecodeError::InvalidValue)
                        };
                        pending_monitor_events.push(ev);
index cec09459233daef7b9c38582e17f984d10fa0d6d..718990c4557bc30676179e13bc3afecbbec07e06 100644 (file)
@@ -203,6 +203,9 @@ pub trait Watch<ChannelSigner: Sign> {
        /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
        /// calling [`block_connected`] and [`block_disconnected`] on the monitor.
        ///
+       /// Note: this interface MUST error with `ChannelMonitorUpdateErr::PermanentFailure` if
+       /// the given `funding_txo` has previously been registered via `watch_channel`.
+       ///
        /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch
        /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected
        /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected
index 6901cc2f6394de9a5270895c713f659cd81afa8f..30088f2eba6815256990d634afa9e7ec1f2c5d53 100644 (file)
@@ -28,7 +28,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
 use routing::router::get_route;
 use util::config::UserConfig;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
 use util::errors::APIError;
 use util::ser::{ReadableArgs, Writeable};
 use util::test_utils::TestBroadcaster;
@@ -78,9 +78,10 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) {
        };
 
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
-       // PaymentFailed event
+       // PaymentPathFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
@@ -266,9 +267,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
        check_closed_broadcast!(nodes[0], true);
 
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
-       // PaymentFailed event
+       // PaymentPathFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 }
 
 #[test]
@@ -1820,7 +1822,7 @@ fn test_monitor_update_on_pending_forwards() {
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
-       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+       if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] {
                assert_eq!(payment_hash, payment_hash_1);
                assert!(rejected_by_dest);
        } else { panic!("Unexpected event!"); }
@@ -1989,6 +1991,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
        close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -2614,6 +2618,8 @@ fn test_temporary_error_during_shutdown() {
        assert_eq!(txn_a, txn_b);
        assert_eq!(txn_a.len(), 1);
        check_spends!(txn_a[0], funding_tx);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -2634,6 +2640,7 @@ fn test_permanent_error_during_sending_shutdown() {
        assert!(nodes[0].node.close_channel(&channel_id).is_ok());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
@@ -2656,6 +2663,7 @@ fn test_permanent_error_during_handling_shutdown() {
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 2);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
index 858e307b77e429a6c992c0759bcdf751148ac251..4a6e4eb656b6f6fa2f9a5076814853d8ac77f9ea 100644 (file)
@@ -26,7 +26,7 @@ use ln::{PaymentPreimage, PaymentHash};
 use ln::features::{ChannelFeatures, InitFeatures};
 use ln::msgs;
 use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
-use ln::script::ShutdownScript;
+use ln::script::{self, ShutdownScript};
 use ln::channelmanager::{CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use ln::chan_utils;
@@ -44,7 +44,6 @@ use util::scid_utils::scid_from_parts;
 use io;
 use prelude::*;
 use core::{cmp,mem,fmt};
-use core::convert::TryFrom;
 use core::ops::Deref;
 #[cfg(any(test, feature = "fuzztarget", debug_assertions))]
 use sync::Mutex;
@@ -555,21 +554,24 @@ pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
 /// it's 2^24.
 pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24;
 
-/// Maximum counterparty `dust_limit_satoshis` allowed. 2 * standard dust threshold on p2wsh output
-/// Scales up on Bitcoin Core's proceeding policy with dust outputs. A typical p2wsh output is 43
-/// bytes to which Core's `GetDustThreshold()` sums up a minimal spend of 67 bytes (even if
-/// a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee` is set to 3000sat/kb, thus
-/// 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs are p2wsh, a value of
-/// 330 sats is the lower bound desired to ensure good propagation of transactions. We give a bit
-/// of margin to our counterparty and pick up 660 satoshis as an accepted `dust_limit_satoshis`
-/// upper bound to avoid negotiation conflicts with other implementations.
-pub const MAX_DUST_LIMIT_SATOSHIS: u64 = 2 * 330;
-
-/// A typical p2wsh output is 43 bytes to which Core's `GetDustThreshold()` sums up a minimal
-/// spend of 67 bytes (even if a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee`
-/// is set to 3000sat/kb, thus 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs
-/// are p2wsh, a value of 330 sats is the lower bound desired to ensure good propagation of transactions.
-pub const MIN_DUST_LIMIT_SATOSHIS: u64 = 330;
+/// The maximum network dust limit for standard script formats. This currently represents the
+/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
+/// transaction non-standard and thus refuses to relay it.
+/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
+/// implementations use this value for their dust limit today.
+pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+
+/// The maximum channel dust limit we will accept from our counterparty.
+pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+
+/// The dust limit is used for both the commitment transaction outputs as well as the closing
+/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
+/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
+/// In order to avoid having to concern ourselves with standardness during the closing process, we
+/// simply require our counterparty to use a dust limit which will leave any segwit output
+/// standard.
+/// See https://github.com/lightningnetwork/lightning-rfc/issues/905 for more details.
+pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
 
 /// Used to return a simple Error back to ChannelManager. Will get converted to a
 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
@@ -636,7 +638,7 @@ impl<Signer: Sign> Channel<Signer> {
                        return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
                }
                let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis);
-               if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
 
@@ -707,7 +709,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                        feerate_per_kw: feerate,
                        counterparty_dust_limit_satoshis: 0,
-                       holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS,
+                       holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: 0,
                        counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
                        counterparty_htlc_minimum_msat: 0,
@@ -841,11 +843,11 @@ impl<Signer: Sign> Channel<Signer> {
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
-               if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.dust_limit_satoshis >  MAX_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
 
                // Convert things into internal flags and prep our state:
@@ -862,11 +864,11 @@ impl<Signer: Sign> Channel<Signer> {
                let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
 
                let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
-               if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
                        return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
@@ -893,10 +895,10 @@ impl<Signer: Sign> Channel<Signer> {
                                        if script.len() == 0 {
                                                None
                                        } else {
-                                               match ShutdownScript::try_from((script.clone(), their_features)) {
-                                                       Ok(shutdown_script) => Some(shutdown_script.into_inner()),
-                                                       Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
                                                }
+                                               Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
@@ -971,7 +973,7 @@ impl<Signer: Sign> Channel<Signer> {
                        feerate_per_kw: msg.feerate_per_kw,
                        channel_value_satoshis: msg.funding_satoshis,
                        counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
-                       holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS,
+                       holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
                        counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
                        counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
@@ -1615,11 +1617,11 @@ impl<Signer: Sign> Channel<Signer> {
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
-               if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.dust_limit_satoshis > MAX_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
                        return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", config.peer_channel_config_limits.max_minimum_depth, msg.minimum_depth)));
@@ -1638,10 +1640,10 @@ impl<Signer: Sign> Channel<Signer> {
                                        if script.len() == 0 {
                                                None
                                        } else {
-                                               match ShutdownScript::try_from((script.clone(), their_features)) {
-                                                       Ok(shutdown_script) => Some(shutdown_script.into_inner()),
-                                                       Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
                                                }
+                                               Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
@@ -3491,17 +3493,16 @@ impl<Signer: Sign> Channel<Signer> {
                }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
-               let shutdown_scriptpubkey = match ShutdownScript::try_from((msg.scriptpubkey.clone(), their_features)) {
-                       Ok(script) => script.into_inner(),
-                       Err(_) => return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))),
-               };
+               if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+                       return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
+               }
 
                if self.counterparty_shutdown_scriptpubkey.is_some() {
-                       if Some(&shutdown_scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
-                               return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", shutdown_scriptpubkey.to_bytes().to_hex())));
+                       if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
+                               return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
                        }
                } else {
-                       self.counterparty_shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+                       self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
                }
 
                // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
@@ -3628,6 +3629,12 @@ impl<Signer: Sign> Channel<Signer> {
                        },
                };
 
+               for outp in closing_tx.trust().built_transaction().output.iter() {
+                       if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+                               return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+                       }
+               }
+
                assert!(self.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
@@ -5509,7 +5516,7 @@ mod tests {
        use bitcoin::hashes::hex::FromHex;
        use hex;
        use ln::{PaymentPreimage, PaymentHash};
-       use ln::channelmanager::{HTLCSource, MppId};
+       use ln::channelmanager::{HTLCSource, PaymentId};
        use ln::channel::{Channel,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys};
        use ln::channel::MAX_FUNDING_SATOSHIS;
        use ln::features::InitFeatures;
@@ -5683,7 +5690,7 @@ mod tests {
                                path: Vec::new(),
                                session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
                                first_hop_htlc_msat: 548,
-                               mpp_id: MppId([42; 32]),
+                               payment_id: PaymentId([42; 32]),
                        }
                });
 
index 43fd8f1de7babbacf1aa10af222d8cf31b2ce2db..1b5813a301851229fdd2967dc22e025042b2cab8 100644 (file)
@@ -52,7 +52,7 @@ use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
 use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::{byte_utils, events};
 use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -172,20 +172,20 @@ struct ClaimableHTLC {
        onion_payload: OnionPayload,
 }
 
-/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally.
+/// A payment identifier used to uniquely identify a payment to LDK.
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub(crate) struct MppId(pub [u8; 32]);
+pub struct PaymentId(pub [u8; 32]);
 
-impl Writeable for MppId {
+impl Writeable for PaymentId {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)
        }
 }
 
-impl Readable for MppId {
+impl Readable for PaymentId {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let buf: [u8; 32] = Readable::read(r)?;
-               Ok(MppId(buf))
+               Ok(PaymentId(buf))
        }
 }
 /// Tracks the inbound corresponding to an outbound HTLC
@@ -198,7 +198,7 @@ pub(crate) enum HTLCSource {
                /// Technically we can recalculate this from the route, but we cache it here to avoid
                /// doing a double-pass on route when we get a failure back
                first_hop_htlc_msat: u64,
-               mpp_id: MppId,
+               payment_id: PaymentId,
        },
 }
 #[cfg(test)]
@@ -208,7 +208,7 @@ impl HTLCSource {
                        path: Vec::new(),
                        session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
                        first_hop_htlc_msat: 0,
-                       mpp_id: MppId([2; 32]),
+                       payment_id: PaymentId([2; 32]),
                }
        }
 }
@@ -242,6 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
+       chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
 }
 impl MsgHandleErrInternal {
@@ -257,6 +258,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
@@ -267,12 +269,13 @@ impl MsgHandleErrInternal {
                                err,
                                action: msgs::ErrorAction::IgnoreError,
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
-               Self { err, shutdown_finish: None }
+               Self { err, chan_id: None, shutdown_finish: None }
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
@@ -286,6 +289,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: Some(channel_id),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                }
        }
@@ -320,6 +324,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
@@ -395,6 +400,65 @@ struct PendingInboundPayment {
        min_value_msat: Option<u64>,
 }
 
+/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
+/// and later, also stores information for retrying the payment.
+pub(crate) enum PendingOutboundPayment {
+       Legacy {
+               session_privs: HashSet<[u8; 32]>,
+       },
+       Retryable {
+               session_privs: HashSet<[u8; 32]>,
+               payment_hash: PaymentHash,
+               payment_secret: Option<PaymentSecret>,
+               pending_amt_msat: u64,
+               /// The total payment amount across all paths, used to verify that a retry is not overpaying.
+               total_msat: u64,
+               /// Our best known block height at the time this payment was initiated.
+               starting_block_height: u32,
+       },
+}
+
+impl PendingOutboundPayment {
+       fn remove(&mut self, session_priv: &[u8; 32], part_amt_msat: u64) -> bool {
+               let remove_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.remove(session_priv)
+                       }
+               };
+               if remove_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+                               *pending_amt_msat -= part_amt_msat;
+                       }
+               }
+               remove_res
+       }
+
+       fn insert(&mut self, session_priv: [u8; 32], part_amt_msat: u64) -> bool {
+               let insert_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.insert(session_priv)
+                       }
+               };
+               if insert_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+                               *pending_amt_msat += part_amt_msat;
+                       }
+               }
+               insert_res
+       }
+
+       fn remaining_parts(&self) -> usize {
+               match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.len()
+                       }
+               }
+       }
+}
+
 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -481,20 +545,19 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// Locked *after* channel_state.
        pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
 
-       /// The session_priv bytes of outbound payments which are pending resolution.
+       /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
        /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
        /// (if the channel has been force-closed), however we track them here to prevent duplicative
-       /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+       /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
        /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
        /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
        /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
        /// after reloading from disk while replaying blocks against ChannelMonitors.
        ///
-       /// Each payment has each of its MPP part's session_priv bytes in the HashSet of the map (even
-       /// payments over a single path).
+       /// See `PendingOutboundPayment` documentation for more info.
        ///
        /// Locked *after* channel_state.
-       pending_outbound_payments: Mutex<HashMap<MppId, HashSet<[u8; 32]>>>,
+       pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
 
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
@@ -813,12 +876,13 @@ macro_rules! handle_error {
        ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
                match $internal {
                        Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+                       Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
                                #[cfg(debug_assertions)]
                                {
                                        // In testing, ensure there are no deadlocks where the lock is already held upon
                                        // entering the macro.
                                        assert!($self.channel_state.try_lock().is_ok());
+                                       assert!($self.pending_events.try_lock().is_ok());
                                }
 
                                let mut msg_events = Vec::with_capacity(2);
@@ -830,6 +894,9 @@ macro_rules! handle_error {
                                                        msg: update
                                                });
                                        }
+                                       if let Some(channel_id) = chan_id {
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+                                       }
                                }
 
                                log_error!($self.logger, "{}", err.err);
@@ -1363,6 +1430,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: channel_update
                                                        });
                                                }
+                                               if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                       pending_events_lock.push(events::Event::ChannelClosed {
+                                                               channel_id: *channel_id,
+                                                               reason: ClosureReason::HolderForceClosed
+                                                       });
+                                               }
                                        }
                                        break Ok(());
                                },
@@ -1438,7 +1511,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+       /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+       /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -1451,6 +1526,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
                                }
+                               let mut pending_events_lock = self.pending_events.lock().unwrap();
+                               if peer_node_id.is_some() {
+                                       if let Some(peer_msg) = peer_msg {
+                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+                                       }
+                               } else {
+                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+                               }
                                chan.remove_entry().1
                        } else {
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
@@ -1472,7 +1555,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               match self.force_close_channel_with_peer(channel_id, None) {
+               match self.force_close_channel_with_peer(channel_id, None, None) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
@@ -1853,7 +1936,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        // Only public for testing, this should otherwise never be called direcly
-       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
+       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
@@ -1868,9 +1951,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
-               let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new());
-               assert!(sessions.insert(session_priv_bytes));
 
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
@@ -1888,12 +1968,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if !chan.get().is_live() {
                                                return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                        }
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                               path: path.clone(),
-                                               session_priv: session_priv.clone(),
-                                               first_hop_htlc_msat: htlc_msat,
-                                               mpp_id,
-                                       }, onion_packet, &self.logger), channel_state, chan)
+                                       let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                                                       path: path.clone(),
+                                                       session_priv: session_priv.clone(),
+                                                       first_hop_htlc_msat: htlc_msat,
+                                                       payment_id,
+                                               }, onion_packet, &self.logger),
+                                       channel_state, chan);
+
+                                       let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+                                       let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable {
+                                               session_privs: HashSet::new(),
+                                               pending_amt_msat: 0,
+                                               payment_hash: *payment_hash,
+                                               payment_secret: *payment_secret,
+                                               starting_block_height: self.best_block.read().unwrap().height(),
+                                               total_msat: total_value,
+                                       });
+                                       assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat));
+
+                                       send_res
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
@@ -1972,11 +2067,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
        /// bit set (either as required or as available). If multiple paths are present in the Route,
        /// we assume the invoice had the basic_mpp feature set.
-       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
-               self.send_payment_internal(route, payment_hash, payment_secret, None)
+       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
+               self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
        }
 
-       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>) -> Result<(), PaymentSendFailure> {
+       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
@@ -1992,7 +2087,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut total_value = 0;
                let our_node_id = self.get_our_node_id();
                let mut path_errs = Vec::with_capacity(route.paths.len());
-               let mpp_id = MppId(self.keys_manager.get_secure_random_bytes());
+               let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
                'path_check: for path in route.paths.iter() {
                        if path.len() < 1 || path.len() > 20 {
                                path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
@@ -2010,11 +2105,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if path_errs.iter().any(|e| e.is_err()) {
                        return Err(PaymentSendFailure::PathParameterError(path_errs));
                }
+               if let Some(amt_msat) = recv_value_msat {
+                       debug_assert!(amt_msat >= total_value);
+                       total_value = amt_msat;
+               }
 
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
-                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage));
+                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
@@ -2034,10 +2133,58 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else if has_err {
                        Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
                } else {
-                       Ok(())
+                       Ok(payment_id)
                }
        }
 
+       /// Retries a payment along the given [`Route`].
+       ///
+       /// Errors returned are a superset of those returned from [`send_payment`], so see
+       /// [`send_payment`] documentation for more details on errors. This method will also error if the
+       /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment
+       /// for the given `payment_id` cannot be found (likely due to timeout or success).
+       ///
+       /// [`send_payment`]: [`ChannelManager::send_payment`]
+       pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+               for path in route.paths.iter() {
+                       if path.len() == 0 {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: "length-0 path in route".to_string()
+                               }))
+                       }
+               }
+
+               let (total_msat, payment_hash, payment_secret) = {
+                       let outbounds = self.pending_outbound_payments.lock().unwrap();
+                       if let Some(payment) = outbounds.get(&payment_id) {
+                               match payment {
+                                       PendingOutboundPayment::Retryable {
+                                               total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+                                       } => {
+                                               let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+                                               if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                               err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+                                                       }))
+                                               }
+                                               (*total_msat, *payment_hash, *payment_secret)
+                                       },
+                                       PendingOutboundPayment::Legacy { .. } => {
+                                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                       err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
+                                               }))
+                                       }
+                               }
+                       } else {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+                               }))
+                       }
+               };
+               return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+       }
+
        /// Send a spontaneous payment, which is a payment that does not require the recipient to have
        /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
        /// the preimage, it must be a cryptographically secure random value that no intermediate node
@@ -2052,14 +2199,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Note that `route` must have exactly one path.
        ///
        /// [`send_payment`]: Self::send_payment
-       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
+       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
                let preimage = match payment_preimage {
                        Some(p) => p,
                        None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
                };
                let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
-               match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) {
-                       Ok(()) => Ok(payment_hash),
+               match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
+                       Ok(payment_id) => Ok((payment_hash, payment_id)),
                        Err(e) => Err(e)
                }
        }
@@ -2416,6 +2563,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
+                                                                                       // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
@@ -2849,27 +2997,25 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        self.fail_htlc_backwards_internal(channel_state,
                                                htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
                                },
-                               HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => {
+                               HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                        let mut session_priv_bytes = [0; 32];
                                        session_priv_bytes.copy_from_slice(&session_priv[..]);
                                        let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                                       if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
-                                               if sessions.get_mut().remove(&session_priv_bytes) {
+                                       if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                               if payment.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
                                                        self.pending_events.lock().unwrap().push(
-                                                               events::Event::PaymentFailed {
+                                                               events::Event::PaymentPathFailed {
                                                                        payment_hash,
                                                                        rejected_by_dest: false,
                                                                        network_update: None,
-                                                                       all_paths_failed: sessions.get().len() == 0,
+                                                                       all_paths_failed: payment.get().remaining_parts() == 0,
+                                                                       path: path.clone(),
                                                                        #[cfg(test)]
                                                                        error_code: None,
                                                                        #[cfg(test)]
                                                                        error_data: None,
                                                                }
                                                        );
-                                                       if sessions.get().len() == 0 {
-                                                               sessions.remove();
-                                                       }
                                                }
                                        } else {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
@@ -2895,19 +3041,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
-                       HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => {
+                       HTLCSource::OutboundRoute { ref path, session_priv, payment_id, .. } => {
                                let mut session_priv_bytes = [0; 32];
                                session_priv_bytes.copy_from_slice(&session_priv[..]);
                                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                                let mut all_paths_failed = false;
-                               if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
-                                       if !sessions.get_mut().remove(&session_priv_bytes) {
+                               if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(payment_id) {
+                                       if !sessions.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                                return;
                                        }
-                                       if sessions.get().len() == 0 {
+                                       if sessions.get().remaining_parts() == 0 {
                                                all_paths_failed = true;
-                                               sessions.remove();
                                        }
                                } else {
                                        log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
@@ -2925,11 +3070,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                // process_onion_failure we should close that channel as it implies our
                                                // next-hop is needlessly blaming us!
                                                self.pending_events.lock().unwrap().push(
-                                                       events::Event::PaymentFailed {
+                                                       events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: !payment_retryable,
                                                                network_update,
                                                                all_paths_failed,
+                                                               path: path.clone(),
 #[cfg(test)]
                                                                error_code: onion_error_code,
 #[cfg(test)]
@@ -2951,11 +3097,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
                                                self.pending_events.lock().unwrap().push(
-                                                       events::Event::PaymentFailed {
+                                                       events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: path.len() == 1,
                                                                network_update: None,
                                                                all_paths_failed,
+                                                               path: path.clone(),
 #[cfg(test)]
                                                                error_code: Some(*failure_code),
 #[cfg(test)]
@@ -3152,13 +3299,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
-                       HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => {
+                       HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
                                let mut session_priv_bytes = [0; 32];
                                session_priv_bytes.copy_from_slice(&session_priv[..]);
                                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                               let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) {
-                                       sessions.remove(&session_priv_bytes)
+                               let found_payment = if let Some(mut sessions) = outbounds.remove(&payment_id) {
+                                       sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat)
                                } else { false };
                                if found_payment {
                                        let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
@@ -3406,7 +3553,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                               shutdown_finish.0.take();
+                                                       }
+                                               }
+                                               return res
                                        }
                                        funding_tx
                                },
@@ -3549,6 +3705,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: update
                                });
                        }
+                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
                }
                Ok(())
        }
@@ -3944,7 +4101,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                        }
                                },
-                               MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
                                        let mut channel_lock = self.channel_state.lock().unwrap();
                                        let channel_state = &mut *channel_lock;
                                        let by_id = &mut channel_state.by_id;
@@ -3960,6 +4117,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -4021,6 +4179,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        Err(e) => {
                                                let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               // ChannelClosed event is generated by handle_error for us
                                                !close_channel
                                        }
                                }
@@ -4074,6 +4233,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                });
                                                        }
 
+                                                       if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                               pending_events_lock.push(events::Event::ChannelClosed {
+                                                                       channel_id: *channel_id,
+                                                                       reason: ClosureReason::CooperativeClosure
+                                                               });
+                                                       }
+
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
                                                        false
@@ -4226,6 +4392,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_pending_events(&event_handler);
                events.into_inner()
        }
+
+       #[cfg(test)]
+       pub fn has_pending_payments(&self) -> bool {
+               !self.pending_outbound_payments.lock().unwrap().is_empty()
+       }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
@@ -4401,6 +4572,16 @@ where
                payment_secrets.retain(|_, inbound_payment| {
                        inbound_payment.expiry_time > header.time as u64
                });
+
+               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+               outbounds.retain(|_, payment| {
+                       const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+                       if payment.remaining_parts() != 0 { return true }
+                       if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment {
+                               return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height
+                       }
+                       true
+               });
        }
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
@@ -4494,6 +4675,7 @@ where
                                                        msg: update
                                                });
                                        }
+                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@ -4684,6 +4866,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                false
                                        } else {
                                                true
@@ -4698,6 +4881,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
+                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -4788,12 +4972,12 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        for chan in self.list_channels() {
                                if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
                                }
                        }
                } else {
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
                }
        }
 }
@@ -5041,23 +5225,23 @@ impl Readable for HTLCSource {
                                let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
                                let mut first_hop_htlc_msat: u64 = 0;
                                let mut path = Some(Vec::new());
-                               let mut mpp_id = None;
+                               let mut payment_id = None;
                                read_tlv_fields!(reader, {
                                        (0, session_priv, required),
-                                       (1, mpp_id, option),
+                                       (1, payment_id, option),
                                        (2, first_hop_htlc_msat, required),
                                        (4, path, vec_type),
                                });
-                               if mpp_id.is_none() {
-                                       // For backwards compat, if there was no mpp_id written, use the session_priv bytes
+                               if payment_id.is_none() {
+                                       // For backwards compat, if there was no payment_id written, use the session_priv bytes
                                        // instead.
-                                       mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref()));
+                                       payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
                                }
                                Ok(HTLCSource::OutboundRoute {
                                        session_priv: session_priv.0.unwrap(),
                                        first_hop_htlc_msat: first_hop_htlc_msat,
                                        path: path.unwrap(),
-                                       mpp_id: mpp_id.unwrap(),
+                                       payment_id: payment_id.unwrap(),
                                })
                        }
                        1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
@@ -5069,12 +5253,12 @@ impl Readable for HTLCSource {
 impl Writeable for HTLCSource {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
                match self {
-                       HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => {
+                       HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
                                0u8.write(writer)?;
-                               let mpp_id_opt = Some(mpp_id);
+                               let payment_id_opt = Some(payment_id);
                                write_tlv_fields!(writer, {
                                        (0, session_priv, required),
-                                       (1, mpp_id_opt, option),
+                                       (1, payment_id_opt, option),
                                        (2, first_hop_htlc_msat, required),
                                        (4, path, vec_type),
                                 });
@@ -5119,6 +5303,20 @@ impl_writeable_tlv_based!(PendingInboundPayment, {
        (8, min_value_msat, required),
 });
 
+impl_writeable_tlv_based_enum!(PendingOutboundPayment,
+       (0, Legacy) => {
+               (0, session_privs, required),
+       },
+       (2, Retryable) => {
+               (0, session_privs, required),
+               (2, payment_hash, required),
+               (4, payment_secret, option),
+               (6, total_msat, required),
+               (8, pending_amt_msat, required),
+               (10, starting_block_height, required),
+       },
+;);
+
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -5209,18 +5407,34 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
                // For backwards compat, write the session privs and their total length.
                let mut num_pending_outbounds_compat: u64 = 0;
-               for (_, outbounds) in pending_outbound_payments.iter() {
-                       num_pending_outbounds_compat += outbounds.len() as u64;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       num_pending_outbounds_compat += outbound.remaining_parts() as u64;
                }
                num_pending_outbounds_compat.write(writer)?;
-               for (_, outbounds) in pending_outbound_payments.iter() {
-                       for outbound in outbounds.iter() {
-                               outbound.write(writer)?;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       for session_priv in session_privs.iter() {
+                                               session_priv.write(writer)?;
+                                       }
+                               }
                        }
                }
 
+               // Encode without retry info for 0.0.101 compatibility.
+               let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+               for (id, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
+                               }
+                       }
+               }
                write_tlv_fields!(writer, {
-                       (1, pending_outbound_payments, required),
+                       (1, pending_outbound_payments_no_retry, required),
+                       (3, pending_outbound_payments, required),
                });
 
                Ok(())
@@ -5358,6 +5572,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
@@ -5388,6 +5603,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+                                       channel_closures.push(events::Event::ChannelClosed {
+                                               channel_id: channel.channel_id(),
+                                               reason: ClosureReason::OutdatedChannelManager
+                                       });
                                } else {
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
@@ -5453,6 +5672,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                None => continue,
                        }
                }
+               if forward_htlcs_count > 0 {
+                       // If we have pending HTLCs to forward, assume we either dropped a
+                       // `PendingHTLCsForwardable` or the user received it but never processed it as they
+                       // shut down before the timer hit. Either way, set the time_forwardable to a small
+                       // constant as enough time has likely passed that we should simply handle the forwards
+                       // now, or at least after the user gets a chance to reconnect to our peers.
+                       pending_events_read.push(events::Event::PendingHTLCsForwardable {
+                               time_forwardable: Duration::from_secs(2),
+                       });
+               }
 
                let background_event_count: u64 = Readable::read(reader)?;
                let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
@@ -5475,26 +5704,42 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
-               let mut pending_outbound_payments_compat: HashMap<MppId, HashSet<[u8; 32]>> =
+               let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
                        HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
                for _ in 0..pending_outbound_payments_count_compat {
                        let session_priv = Readable::read(reader)?;
-                       if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() {
+                       let payment = PendingOutboundPayment::Legacy {
+                               session_privs: [session_priv].iter().cloned().collect()
+                       };
+                       if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
                                return Err(DecodeError::InvalidValue)
                        };
                }
 
+               // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
+               let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
                read_tlv_fields!(reader, {
-                       (1, pending_outbound_payments, option),
+                       (1, pending_outbound_payments_no_retry, option),
+                       (3, pending_outbound_payments, option),
                });
-               if pending_outbound_payments.is_none() {
+               if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
+               } else if pending_outbound_payments.is_none() {
+                       let mut outbounds = HashMap::new();
+                       for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
+                               outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
+                       }
+                       pending_outbound_payments = Some(outbounds);
                }
 
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
+               if !channel_closures.is_empty() {
+                       pending_events_read.append(&mut channel_closures);
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
@@ -5549,7 +5794,7 @@ mod tests {
        use bitcoin::hashes::sha256::Hash as Sha256;
        use core::time::Duration;
        use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use ln::channelmanager::{MppId, PaymentSendFailure};
+       use ln::channelmanager::{PaymentId, PaymentSendFailure};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs;
@@ -5700,11 +5945,11 @@ mod tests {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
                let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
-               let mpp_id = MppId([42; 32]);
+               let payment_id = PaymentId([42; 32]);
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5734,7 +5979,7 @@ mod tests {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
 
                // Send the second half of the original MPP payment.
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5831,7 +6076,7 @@ mod tests {
                // To start (2), send a keysend payment but don't claim it.
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
-               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5890,7 +6135,7 @@ mod tests {
 
                let test_preimage = PaymentPreimage([42; 32]);
                let mismatch_payment_hash = PaymentHash([43; 32]);
-               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap();
+               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -5927,7 +6172,7 @@ mod tests {
                let test_preimage = PaymentPreimage([42; 32]);
                let test_secret = PaymentSecret([43; 32]);
                let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
-               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap();
+               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
index fe35fe65b82121a45438386ff54e12a830439540..a95d32476b4d3ead0a94e45bd42c5d6bda56f890 100644 (file)
@@ -743,16 +743,16 @@ macro_rules! get_closing_signed_broadcast {
 #[macro_export]
 macro_rules! check_closed_broadcast {
        ($node: expr, $with_error_msg: expr) => {{
-               let events = $node.node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 });
-               match events[0] {
+               let msg_events = $node.node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 });
+               match msg_events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                assert_eq!(msg.contents.flags & 2, 2);
                        },
                        _ => panic!("Unexpected event"),
                }
                if $with_error_msg {
-                       match events[1] {
+                       match msg_events[1] {
                                MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
                                        // TODO: Check node_id
                                        Some(msg.clone())
@@ -763,6 +763,24 @@ macro_rules! check_closed_broadcast {
        }}
 }
 
+/// Check that a channel's closing channel event has been issued
+#[macro_export]
+macro_rules! check_closed_event {
+       ($node: expr, $events: expr, $reason: expr) => {{
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), $events);
+               let expected_reason = $reason;
+               for event in events {
+                       match event {
+                               Event::ChannelClosed { ref reason, .. } => {
+                                       assert_eq!(*reason, expected_reason);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+               }
+       }}
+}
+
 pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
@@ -986,6 +1004,20 @@ macro_rules! expect_pending_htlcs_forwardable {
        }}
 }
 
+#[cfg(test)]
+macro_rules! expect_pending_htlcs_forwardable_from_events {
+       ($node: expr, $events: expr, $ignore: expr) => {{
+               assert_eq!($events.len(), 1);
+               match $events[0] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
+               if $ignore {
+                       $node.node.process_pending_htlc_forwards();
+               }
+       }}
+}
+
 #[cfg(any(test, feature = "unstable"))]
 macro_rules! expect_payment_received {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
@@ -1043,7 +1075,7 @@ macro_rules! expect_payment_failed_with_update {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
-                       Event::PaymentFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => {
+                       Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => {
                                assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
                                assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
                                assert!(error_code.is_some(), "expected error_code.is_some() = true");
@@ -1072,7 +1104,7 @@ macro_rules! expect_payment_failed {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
-                       Event::PaymentFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
+                       Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
                                assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
                                assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
                                assert!(error_code.is_some(), "expected error_code.is_some() = true");
@@ -1369,10 +1401,13 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                        let events = origin_node.node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
                        match events[0] {
-                               Event::PaymentFailed { payment_hash, rejected_by_dest, all_paths_failed, .. } => {
+                               Event::PaymentPathFailed { payment_hash, rejected_by_dest, all_paths_failed, ref path, .. } => {
                                        assert_eq!(payment_hash, our_payment_hash);
                                        assert!(rejected_by_dest);
                                        assert_eq!(all_paths_failed, i == expected_paths.len() - 1);
+                                       for (idx, hop) in expected_route.iter().enumerate() {
+                                               assert_eq!(hop.node.get_our_node_id(), path[idx].pubkey);
+                                       }
                                },
                                _ => panic!("Unexpected event"),
                        }
index 08dc81e0206442c1b9263458ee6c3e3f54a3e55e..bb9556fc46a1003e334a990ad66efbcb7c1a4c4b 100644 (file)
@@ -19,7 +19,7 @@ use chain::transaction::OutPoint;
 use chain::keysinterface::BaseSign;
 use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MppId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
+use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
 use ln::channel::{Channel, ChannelError};
 use ln::{chan_utils, onion_utils};
 use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
@@ -30,7 +30,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
 use util::errors::APIError;
 use util::ser::{Writeable, ReadableArgs};
 use util::config::UserConfig;
@@ -638,6 +638,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
 }
 
 #[test]
@@ -738,6 +739,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -850,6 +853,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -977,10 +982,20 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -1176,6 +1191,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        // Check we only broadcast 1 timeout tx
@@ -1457,6 +1473,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
 }
 
 #[test]
@@ -1480,7 +1497,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
 
-       let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
+       let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
                + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
@@ -1583,6 +1600,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
 }
 
 #[test]
@@ -2039,6 +2057,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
@@ -2059,6 +2079,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
@@ -2101,6 +2123,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2172,6 +2196,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2221,6 +2247,7 @@ fn test_justice_tx() {
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2228,6 +2255,7 @@ fn test_justice_tx() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2269,9 +2297,11 @@ fn test_justice_tx() {
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2299,6 +2329,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
 
@@ -2308,7 +2339,8 @@ fn revoked_output_claim() {
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
-       check_added_monitors!(nodes[0], 1)
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2345,8 +2377,10 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
@@ -2403,7 +2437,13 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+               match events[1] {
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+                       _ => panic!("Unexpected event"),
+               }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
@@ -2501,6 +2541,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
@@ -2526,11 +2567,15 @@ fn test_htlc_on_chain_success() {
                added_monitors.clear();
        }
        let forwarded_events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(forwarded_events.len(), 2);
-       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
-               } else { panic!(); }
+       assert_eq!(forwarded_events.len(), 3);
+       match forwarded_events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
                } else { panic!(); }
+       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
+               } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -2597,6 +2642,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
@@ -2632,7 +2678,7 @@ fn test_htlc_on_chain_success() {
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 2);
+       assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
@@ -2645,6 +2691,7 @@ fn test_htlc_on_chain_success() {
                                        assert_eq!(payment_hash, payment_hash_2);
                                }
                        },
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@ -2701,6 +2748,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
@@ -2710,6 +2758,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2777,6 +2826,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@ -2815,6 +2865,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@ -2964,15 +3015,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
-               Event::PaymentFailed { ref payment_hash, .. } => {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
+               _ => panic!("Unexepected event"),
+       }
+       match events[1] {
+               Event::PaymentPathFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
-               match events[1] {
+               match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -3022,7 +3077,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 3);
                        match events[0] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        // If we delivered B's RAA we got an unknown preimage error, not something
                                        // that we should update our routing table for.
@@ -3033,14 +3088,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[2] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
@@ -3132,9 +3187,21 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
-
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
-       expect_payment_failed!(nodes[0], failed_payment_hash, true);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash, .. } => {
+                       assert_eq!(payment_hash, failed_payment_hash);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
+                       assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
+               },
+               _ => panic!("Unexpected event {:?}", events[1]),
+       }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 }
@@ -3154,6 +3221,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
@@ -3163,6 +3231,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3217,6 +3286,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3231,6 +3301,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3323,7 +3394,7 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
-                       Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+                       Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(rejected_by_dest);
                        },
@@ -3891,8 +3962,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
-               let mpp_id = MppId([42; 32]);
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, mpp_id, &None).unwrap();
+               let payment_id = PaymentId([42; 32]);
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -4126,7 +4197,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        //
        // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
        // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
-       // PaymentFailed event appearing). However, because we may not serialize the relevant
+       // PaymentPathFailed event appearing). However, because we may not serialize the relevant
        // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
        // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
        // and de-duplicates ChannelMonitor events.
@@ -4148,6 +4219,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -4165,6 +4237,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 
        header.prev_blockhash = nodes[0].best_block_hash();
@@ -4509,6 +4582,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
+       check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
 
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@ -4572,6 +4646,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4600,6 +4675,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@ -4609,6 +4685,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4636,6 +4713,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4688,6 +4766,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        mine_transaction(&nodes[1], &node_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4732,6 +4811,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[1]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, true);
 
@@ -4762,6 +4842,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -4798,6 +4879,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4813,6 +4895,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@ -4869,6 +4952,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert_eq!(revoked_htlc_txn.len(), 2);
@@ -4885,6 +4969,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@ -4965,6 +5050,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
@@ -4981,7 +5067,19 @@ fn test_onchain_to_onchain_claim() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
-       expect_payment_forwarded!(nodes[1], Some(1000), true);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+                       assert_eq!(fee_earned_msat, Some(1000));
+                       assert_eq!(claim_from_onchain_tx, true);
+               },
+               _ => panic!("Unexpected event"),
+       }
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx
@@ -4989,9 +5087,9 @@ fn test_onchain_to_onchain_claim() {
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
+       check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
@@ -5013,6 +5111,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@ -5070,6 +5169,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -5096,6 +5196,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5184,6 +5285,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5354,9 +5456,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
+       let events = nodes[2].node.get_and_clear_pending_events();
+       let close_event = if deliver_last_raa {
+               assert_eq!(events.len(), 2);
+               events[1].clone()
+       } else {
+               assert_eq!(events.len(), 1);
+               events[0].clone()
+       };
+       match close_event {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       if deliver_last_raa {
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+       } else {
+               expect_pending_htlcs_forwardable!(nodes[2]);
+       }
        check_added_monitors!(nodes[2], 3);
 
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
@@ -5405,7 +5524,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        let mut as_failds = HashSet::new();
        let mut as_updates = 0;
        for event in as_events.iter() {
-               if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
+               if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(as_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@ -5430,7 +5549,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        let mut bs_failds = HashSet::new();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
-               if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
+               if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(bs_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@ -5493,6 +5612,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5576,6 +5696,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5616,6 +5737,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5623,6 +5745,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5674,6 +5797,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5706,6 +5830,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5754,6 +5879,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5866,7 +5992,7 @@ fn bolt2_open_channel_sane_dust_limit() {
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       node0_to_1_send_open_channel.dust_limit_satoshis = 661;
+       node0_to_1_send_open_channel.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
@@ -5877,7 +6003,7 @@ fn bolt2_open_channel_sane_dust_limit() {
                },
                _ => panic!("Unexpected event"),
        };
-       assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)");
+       assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
 }
 
 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
@@ -5949,7 +6075,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
-               &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => {
+               &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(our_payment_hash.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
                        assert_eq!(*all_paths_failed, true);
@@ -6036,7 +6162,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
-               &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => {
+               &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(payment_hash_2.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
                        assert_eq!(*all_paths_failed, true);
@@ -6294,6 +6420,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
 }
 
 #[test]
@@ -6421,6 +6548,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6457,6 +6585,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6501,6 +6630,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6526,6 +6656,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6551,6 +6682,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6600,6 +6732,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6633,6 +6766,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6666,6 +6800,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6699,6 +6834,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6740,6 +6876,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6781,6 +6918,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6829,6 +6967,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6971,16 +7110,17 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
+       // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
        let mut first_failed = false;
        for event in events {
                match event {
-                       Event::PaymentFailed { payment_hash, .. } => {
+                       Event::PaymentPathFailed { payment_hash, .. } => {
                                if payment_hash == payment_hash_1 {
                                        assert!(!first_failed);
                                        first_failed = true;
@@ -7031,6 +7171,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, true);
 
@@ -7050,6 +7191,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
@@ -7068,14 +7210,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                        assert_eq!(events.len(), 2);
                        let first;
                        match events[0] {
-                               Event::PaymentFailed { payment_hash, .. } => {
+                               Event::PaymentPathFailed { payment_hash, .. } => {
                                        if payment_hash == dust_hash { first = true; }
                                        else { first = false; }
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
-                               Event::PaymentFailed { payment_hash, .. } => {
+                               Event::PaymentPathFailed { payment_hash, .. } => {
                                        if first { assert_eq!(payment_hash, non_dust_hash); }
                                        else { assert_eq!(payment_hash, dust_hash); }
                                },
@@ -7131,14 +7273,17 @@ fn test_user_configurable_csv_delay() {
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+       let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
                                assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
+                               reason_msg = msg.data.clone();
                        },
-                       _ => { assert!(false); }
+                       _ => { panic!(); }
                }
-       } else { assert!(false); }
+       } else { panic!(); }
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@ -7251,10 +7396,10 @@ fn test_data_loss_protect() {
 
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
 
-
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -7262,6 +7407,7 @@ fn test_data_loss_protect() {
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@ -7704,6 +7850,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7725,7 +7872,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        let first;
        let feerate_1;
        let penalty_txn;
@@ -7971,6 +8123,7 @@ fn test_counterparty_raa_skip_no_crash() {
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
 }
 
 #[test]
@@ -8003,6 +8156,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@ -8486,6 +8640,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
 }
 
 #[test]
@@ -8520,6 +8675,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
 
        let htlc_timeout = {
@@ -8579,6 +8735,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8590,6 +8747,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
@@ -8670,6 +8828,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8885,6 +9044,7 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -8894,6 +9054,7 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -8958,6 +9119,7 @@ fn test_invalid_funding_tx() {
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
        confirm_transaction_at(&nodes[1], &tx, 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -9001,6 +9163,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -9053,6 +9216,125 @@ fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
        do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
 }
 
+#[test]
+fn test_forwardable_regen() {
+       // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the
+       // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive
+       // HTLCs.
+       // We test it for both payment receipt and payment forwarding.
+
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let persister: test_utils::TestPersister;
+       let new_chain_monitor: test_utils::TestChainMonitor;
+       let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+       // First send a payment to nodes[1]
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+
+       // Next send a payment which is forwarded by nodes[1]
+       let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
+       nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+       // There is already a PendingHTLCsForwardable event "pending" so another one will not be
+       // generated
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+       let nodes_1_serialized = nodes[1].node.encode();
+       let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+       let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+       {
+               let monitors = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap();
+               let mut monitor_iter = monitors.iter();
+               monitor_iter.next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+               monitor_iter.next().unwrap().1.write(&mut chan_1_monitor_serialized).unwrap();
+       }
+
+       persister = test_utils::TestPersister::new();
+       let keys_manager = &chanmon_cfgs[1].keys_manager;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
+       nodes[1].chain_monitor = &new_chain_monitor;
+
+       let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+       let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+               &mut chan_0_monitor_read, keys_manager).unwrap();
+       assert!(chan_0_monitor_read.is_empty());
+       let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..];
+       let (_, mut chan_1_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+               &mut chan_1_monitor_read, keys_manager).unwrap();
+       assert!(chan_1_monitor_read.is_empty());
+
+       let mut nodes_1_read = &nodes_1_serialized[..];
+       let (_, nodes_1_deserialized_tmp) = {
+               let mut channel_monitors = HashMap::new();
+               channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
+               channel_monitors.insert(chan_1_monitor.get_funding_txo().0, &mut chan_1_monitor);
+               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
+                       default_config: UserConfig::default(),
+                       keys_manager,
+                       fee_estimator: node_cfgs[1].fee_estimator,
+                       chain_monitor: nodes[1].chain_monitor,
+                       tx_broadcaster: nodes[1].tx_broadcaster.clone(),
+                       logger: nodes[1].logger,
+                       channel_monitors,
+               }).unwrap()
+       };
+       nodes_1_deserialized = nodes_1_deserialized_tmp;
+       assert!(nodes_1_read.is_empty());
+
+       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok());
+       nodes[1].node = &nodes_1_deserialized;
+       check_added_monitors!(nodes[1], 2);
+
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       // Note that nodes[1] and nodes[2] resend their funding_locked here since they haven't updated
+       // the commitment state.
+       reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
+       check_added_monitors!(nodes[1], 1);
+
+       let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
+
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
+}
+
 #[test]
 fn test_keysend_payments_to_public_node() {
        let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -9069,7 +9351,7 @@ fn test_keysend_payments_to_public_node() {
                         nodes[0].logger).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
+       let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
        check_added_monitors!(nodes[0], 1);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -9099,7 +9381,7 @@ fn test_keysend_payments_to_private_node() {
                                 nodes[0].logger).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
+       let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
        check_added_monitors!(nodes[0], 1);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -9108,116 +9390,3 @@ fn test_keysend_payments_to_private_node() {
        pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
        claim_payment(&nodes[0], &path, test_preimage);
 }
-
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) {
-       // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy.
-       //
-       // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
-       // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty
-       // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update.
-       // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound
-       // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next
-       // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update.
-       // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be
-       // available again for HTLC processing once the dust bandwidth has cleared up.
-
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let mut config = test_default_channel_config();
-       config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
-       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
-       let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       open_channel.max_htlc_value_in_flight_msat = 50_000_000;
-       open_channel.max_accepted_htlcs = 60;
-       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
-       let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       if on_holder_tx {
-               accept_channel.dust_limit_satoshis = 660;
-       }
-       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
-
-       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42);
-
-       if on_holder_tx {
-               if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
-                       chan.holder_dust_limit_satoshis = 660;
-               }
-       }
-
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
-       check_added_monitors!(nodes[1], 1);
-
-       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
-       check_added_monitors!(nodes[0], 1);
-
-       let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
-       update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
-
-       if on_holder_tx {
-               if dust_outbound_balance {
-                       for i in 0..2 {
-                               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000);
-                               if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
-                       }
-               } else {
-                       for _ in 0..2 {
-                               route_payment(&nodes[0], &[&nodes[1]], 2_300_000);
-                       }
-               }
-       } else {
-               if dust_outbound_balance {
-                       for i in 0..25 {
-                               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU
-                               if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
-                       }
-               } else {
-                       for _ in 0..25 {
-                               route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU
-                       }
-               }
-       }
-
-       if at_forward {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 });
-               let mut config = UserConfig::default();
-               if on_holder_tx {
-                       unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat)));
-               } else {
-                       unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat)));
-               }
-       } else {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 });
-               nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
-               check_added_monitors!(nodes[0], 1);
-               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               let payment_event = SendEvent::from_event(events.remove(0));
-               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-               if on_holder_tx {
-                       nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
-               } else {
-                       nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
-               }
-       }
-
-       let _ = nodes[1].node.get_and_clear_pending_msg_events();
-       let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-       added_monitors.clear();
-}
-
-#[test]
-fn test_max_dust_htlc_exposure() {
-       do_test_max_dust_htlc_exposure(true, true, true);
-       do_test_max_dust_htlc_exposure(false, true, true);
-       do_test_max_dust_htlc_exposure(false, false, true);
-       do_test_max_dust_htlc_exposure(false, false, false);
-       do_test_max_dust_htlc_exposure(true, true, false);
-       do_test_max_dust_htlc_exposure(true, false, false);
-       do_test_max_dust_htlc_exposure(true, false, true);
-       do_test_max_dust_htlc_exposure(false, true, false);
-}
index b5e433270a51ac9d6a363c9152f4f485fbbe39b2..a2a0b4efee2f32418fcdf503a1c5c4b0db0daccc 100644 (file)
@@ -51,6 +51,9 @@ pub mod wire;
 mod functional_tests;
 #[cfg(test)]
 #[allow(unused_mut)]
+mod payment_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
 mod chanmon_update_fail_tests;
 #[cfg(test)]
 #[allow(unused_mut)]
index c621f4eba22327b36602526c70d234f5b6426b72..08cd2306de865970d70afa8bc72b85b4d0909ac6 100644 (file)
@@ -15,7 +15,7 @@ use ln::{channel, PaymentPreimage, PaymentHash};
 use ln::channelmanager::BREAKDOWN_TIMEOUT;
 use ln::features::InitFeatures;
 use ln::msgs::{ChannelMessageHandler, ErrorAction};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use routing::network_graph::NetworkUpdate;
 use routing::router::get_route;
 
@@ -74,6 +74,7 @@ fn chanmon_fail_from_stale_commitment() {
        mine_transaction(&nodes[1], &bs_txn[0]);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
@@ -175,6 +176,8 @@ fn chanmon_claim_value_coop_close() {
                        Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
                check_spends!(spend_tx, shutdown_tx[0]);
        }
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 fn sorted_vec<T: Ord>(mut v: Vec<T>) -> Vec<T> {
@@ -314,9 +317,11 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        assert!(nodes[0].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
index 70d5a0c0bdfe30362668824ed6e77e0e267755a0..0e2d081c83bf862c7a3a80165c9f78b8e87ba1ec 100644 (file)
@@ -163,7 +163,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
-       if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref network_update, ref error_code, error_data: _, ref all_paths_failed } = &events[0] {
+       if let &Event::PaymentPathFailed { payment_hash:_, ref rejected_by_dest, ref network_update, ref error_code, error_data: _, ref all_paths_failed, path: _ } = &events[0] {
                assert_eq!(*rejected_by_dest, !expected_retryable);
                assert_eq!(*all_paths_failed, true);
                assert_eq!(*error_code, expected_error_code);
diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs
new file mode 100644 (file)
index 0000000..f8ff35f
--- /dev/null
@@ -0,0 +1,254 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Tests that test the payment retry logic in ChannelManager, including various edge-cases around
+//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
+//! payments thereafter.
+
+use ln::{PaymentPreimage, PaymentHash};
+use ln::channelmanager::{PaymentId, PaymentSendFailure};
+use routing::router::get_route;
+use ln::features::{InitFeatures, InvoiceFeatures};
+use ln::msgs;
+use ln::msgs::ChannelMessageHandler;
+use util::test_utils;
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::errors::APIError;
+
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::Hash;
+
+use prelude::*;
+
+use ln::functional_test_utils::*;
+
+#[test]
+fn retry_single_path_payment() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       // Rebalance to find a route
+       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
+
+       let logger = test_utils::TestLogger::new();
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
+       let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+       let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+
+       // Rebalance so that the first hop fails.
+       send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
+
+       // Make sure the payment fails on the first hop.
+       let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let mut payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable!(&nodes[1]);
+       let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       assert!(htlc_updates.update_add_htlcs.is_empty());
+       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
+       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
+       check_added_monitors!(nodes[1], 1);
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
+       expect_payment_failed!(nodes[0], payment_hash, false);
+
+       // Rebalance the channel so the retry succeeds.
+       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
+
+       // Mine two blocks (we expire retries after 3, so this will check that we don't expire early)
+       connect_blocks(&nodes[0], 2);
+
+       // Retry the payment and make sure it succeeds.
+       nodes[0].node.retry_payment(&route, payment_id).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
+       claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
+}
+
+#[test]
+fn mpp_retry() {
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_4_id = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let logger = test_utils::TestLogger::new();
+       // Rebalance
+       send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
+
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
+       let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+       let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
+       let path = route.paths[0].clone();
+       route.paths.push(path);
+       route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+       route.paths[0][0].short_channel_id = chan_1_id;
+       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+       route.paths[1][0].short_channel_id = chan_2_id;
+       route.paths[1][1].short_channel_id = chan_4_id;
+
+       // Initiate the MPP payment.
+       let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2); // one monitor per path
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+
+       // Pass half of the payment along the success path.
+       let success_path_msgs = events.remove(0);
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
+
+       // Add the HTLC along the first hop.
+       let fail_path_msgs_1 = events.remove(0);
+       let (update_add, commitment_signed) = match fail_path_msgs_1 {
+               MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+                       assert_eq!(update_add_htlcs.len(), 1);
+                       assert!(update_fail_htlcs.is_empty());
+                       assert!(update_fulfill_htlcs.is_empty());
+                       assert!(update_fail_malformed_htlcs.is_empty());
+                       assert!(update_fee.is_none());
+                       (update_add_htlcs[0].clone(), commitment_signed.clone())
+               },
+               _ => panic!("Unexpected event"),
+       };
+       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
+       commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
+
+       // Attempt to forward the payment and complete the 2nd path's failure.
+       expect_pending_htlcs_forwardable!(&nodes[2]);
+       expect_pending_htlcs_forwardable!(&nodes[2]);
+       let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+       assert!(htlc_updates.update_add_htlcs.is_empty());
+       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
+       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
+       check_added_monitors!(nodes[2], 1);
+       nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
+       expect_payment_failed!(nodes[0], payment_hash, false);
+
+       // Rebalance the channel so the second half of the payment can succeed.
+       send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
+
+       // Make sure it errors as expected given a too-large amount.
+       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, payment_id) {
+               assert!(err.contains("over total_payment_amt_msat"));
+       } else { panic!("Unexpected error"); }
+
+       // Make sure it errors as expected given the wrong payment_id.
+       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId([0; 32])) {
+               assert!(err.contains("not found"));
+       } else { panic!("Unexpected error"); }
+
+       // Retry the second half of the payment and make sure it succeeds.
+       let mut path = route.clone();
+       path.paths.remove(0);
+       nodes[0].node.retry_payment(&path, payment_id).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
+       claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+}
+
+#[test]
+fn retry_expired_payment() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       // Rebalance to find a route
+       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
+
+       let logger = test_utils::TestLogger::new();
+       let (_payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
+       let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+       let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+
+       // Rebalance so that the first hop fails.
+       send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
+
+       // Make sure the payment fails on the first hop.
+       let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let mut payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable!(&nodes[1]);
+       let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       assert!(htlc_updates.update_add_htlcs.is_empty());
+       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
+       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
+       check_added_monitors!(nodes[1], 1);
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
+       expect_payment_failed!(nodes[0], payment_hash, false);
+
+       // Mine blocks so the payment will have expired.
+       connect_blocks(&nodes[0], 3);
+
+       // Retry the payment and make sure it errors as expected.
+       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, payment_id) {
+               assert!(err.contains("not found"));
+       } else {
+               panic!("Unexpected error");
+       }
+}
+
+#[test]
+fn no_pending_leak_on_initial_send_failure() {
+       // In an earlier version of our payment tracking, we'd have a retry entry even when the initial
+       // HTLC for payment failed to send due to local channel errors (e.g. peer disconnected). In this
+       // case, the user wouldn't have a PaymentId to retry the payment with, but we'd think we have a
+       // pending payment forever and never time it out.
+       // Here we test exactly that - retrying a payment when a peer was disconnected on the first
+       // try, and then check that no pending payment is being tracked.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+       let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+       unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)),
+               true, APIError::ChannelUnavailable { ref err },
+               assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
+
+       assert!(!nodes[0].node.has_pending_payments());
+}
index c9bc40c945aa5df7263aec01667a55d17eddad0d..1815d4a350cc0289100d207b9fa2a98029397b1d 100644 (file)
@@ -32,11 +32,11 @@ use routing::network_graph::NetGraphMsgHandler;
 use prelude::*;
 use io;
 use alloc::collections::LinkedList;
-use alloc::fmt::Debug;
 use sync::{Arc, Mutex};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
+use core::convert::Infallible;
 #[cfg(feature = "std")] use std::error;
 
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -80,28 +80,28 @@ impl Deref for IgnoringMessageHandler {
        fn deref(&self) -> &Self { self }
 }
 
-impl wire::Type for () {
+// Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a
+// method that takes self for it.
+impl wire::Type for Infallible {
        fn type_id(&self) -> u16 {
-               // We should never call this for `DummyCustomType`
                unreachable!();
        }
 }
-
-impl Writeable for () {
+impl Writeable for Infallible {
        fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
                unreachable!();
        }
 }
 
 impl wire::CustomMessageReader for IgnoringMessageHandler {
-       type CustomMessage = ();
+       type CustomMessage = Infallible;
        fn read<R: io::Read>(&self, _message_type: u16, _buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
                Ok(None)
        }
 }
 
 impl CustomMessageHandler for IgnoringMessageHandler {
-       fn handle_custom_message(&self, _msg: Self::CustomMessage, _sender_node_id: &PublicKey) -> Result<(), LightningError> {
+       fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: &PublicKey) -> Result<(), LightningError> {
                // Since we always return `None` in the read the handle method should never be called.
                unreachable!();
        }
@@ -469,7 +469,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                CM::Target: ChannelMessageHandler,
                RM::Target: RoutingMessageHandler,
                L::Target: Logger,
-               CMH::Target: CustomMessageHandler + wire::CustomMessageReader {
+               CMH::Target: CustomMessageHandler {
        /// Constructs a new PeerManager with the given message handlers and node_id secret key
        /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
@@ -719,7 +719,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        }
 
        /// Append a message to a peer's pending outbound/write buffer, and update the map of peers needing sends accordingly.
-       fn enqueue_message<M: wire::Type + Writeable + Debug>(&self, peer: &mut Peer, message: &M) {
+       fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
                let mut buffer = VecWriter(Vec::with_capacity(2048));
                wire::write(message, &mut buffer).unwrap(); // crash if the write failed
                let encoded_message = buffer.0;
@@ -1169,6 +1169,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// May call [`send_data`] on [`SocketDescriptor`]s. Thus, be very careful with reentrancy
        /// issues!
        ///
+       /// You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
+       /// or one of the other clients provided in our language bindings.
+       ///
        /// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
        /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
        /// [`send_data`]: SocketDescriptor::send_data
index 494561733336be5a44cc76fed49393a873bc56b3..0409396db11696310cb4ad1a61b7d83870121970 100644 (file)
@@ -18,7 +18,7 @@ use ln::features::InitFeatures;
 use ln::msgs::{ChannelMessageHandler, ErrorAction};
 use routing::network_graph::NetworkUpdate;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
@@ -84,6 +84,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+               check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
@@ -125,6 +126,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        };
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
        check_added_monitors!(nodes[1], 0);
@@ -301,6 +303,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.".to_string() });
+       } else {
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.".to_string() });
+       }
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
@@ -372,6 +380,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A commitment transaction
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
        {
@@ -389,6 +398,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node B
        connect_blocks(&nodes[1], 135);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
        let partial_claim_tx = {
@@ -463,9 +473,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
        assert!(nodes[0].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop transactions broadcasted in response to the first commitment transaction (we have good
        // test coverage of these things already elsewhere).
index 4e81d76ad670d18b2c8b172d4acd94ea62c64607..00ad48bb1e31c07cf695f52282ea2b746cf05455 100644 (file)
@@ -3,7 +3,7 @@
 use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0 as SEGWIT_V0;
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::hashes::Hash;
-use bitcoin::hash_types::{PubkeyHash, ScriptHash, WPubkeyHash, WScriptHash};
+use bitcoin::hash_types::{WPubkeyHash, WScriptHash};
 use bitcoin::secp256k1::key::PublicKey;
 
 use ln::features::InitFeatures;
@@ -66,16 +66,6 @@ impl ShutdownScript {
                Self(ShutdownScriptImpl::Legacy(pubkey))
        }
 
-       /// Generates a P2PKH script pubkey from the given [`PubkeyHash`].
-       pub fn new_p2pkh(pubkey_hash: &PubkeyHash) -> Self {
-               Self(ShutdownScriptImpl::Bolt2(Script::new_p2pkh(pubkey_hash)))
-       }
-
-       /// Generates a P2SH script pubkey from the given [`ScriptHash`].
-       pub fn new_p2sh(script_hash: &ScriptHash) -> Self {
-               Self(ShutdownScriptImpl::Bolt2(Script::new_p2sh(script_hash)))
-       }
-
        /// Generates a P2WPKH script pubkey from the given [`WPubkeyHash`].
        pub fn new_p2wpkh(pubkey_hash: &WPubkeyHash) -> Self {
                Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wpkh(pubkey_hash)))
@@ -126,7 +116,9 @@ impl ShutdownScript {
        }
 }
 
-fn is_bolt2_compliant(script: &Script, features: &InitFeatures) -> bool {
+/// Check if a given script is compliant with BOLT 2's shutdown script requirements for the given
+/// counterparty features.
+pub(crate) fn is_bolt2_compliant(script: &Script, features: &InitFeatures) -> bool {
        if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wpkh() || script.is_v0_p2wsh() {
                true
        } else if features.supports_shutdown_anysegwit() {
@@ -136,6 +128,8 @@ fn is_bolt2_compliant(script: &Script, features: &InitFeatures) -> bool {
        }
 }
 
+// Note that this is only for our own shutdown scripts. Counterparties are still allowed to send us
+// non-witness shutdown scripts which this rejects.
 impl TryFrom<Script> for ShutdownScript {
        type Error = InvalidShutdownScript;
 
@@ -144,11 +138,13 @@ impl TryFrom<Script> for ShutdownScript {
        }
 }
 
+// Note that this is only for our own shutdown scripts. Counterparties are still allowed to send us
+// non-witness shutdown scripts which this rejects.
 impl TryFrom<(Script, &InitFeatures)> for ShutdownScript {
        type Error = InvalidShutdownScript;
 
        fn try_from((script, features): (Script, &InitFeatures)) -> Result<Self, Self::Error> {
-               if is_bolt2_compliant(&script, features) {
+               if is_bolt2_compliant(&script, features) && script.is_witness_program() {
                        Ok(Self(ShutdownScriptImpl::Bolt2(script)))
                } else {
                        Err(InvalidShutdownScript { script })
@@ -216,30 +212,6 @@ mod shutdown_script_tests {
                assert_eq!(shutdown_script.into_inner(), p2wpkh_script);
        }
 
-       #[test]
-       fn generates_p2pkh_from_pubkey_hash() {
-               let pubkey_hash = pubkey().pubkey_hash();
-               let p2pkh_script = Script::new_p2pkh(&pubkey_hash);
-
-               let shutdown_script = ShutdownScript::new_p2pkh(&pubkey_hash);
-               assert!(shutdown_script.is_compatible(&InitFeatures::known()));
-               assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
-               assert_eq!(shutdown_script.into_inner(), p2pkh_script);
-               assert!(ShutdownScript::try_from(p2pkh_script).is_ok());
-       }
-
-       #[test]
-       fn generates_p2sh_from_script_hash() {
-               let script_hash = redeem_script().script_hash();
-               let p2sh_script = Script::new_p2sh(&script_hash);
-
-               let shutdown_script = ShutdownScript::new_p2sh(&script_hash);
-               assert!(shutdown_script.is_compatible(&InitFeatures::known()));
-               assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
-               assert_eq!(shutdown_script.into_inner(), p2sh_script);
-               assert!(ShutdownScript::try_from(p2sh_script).is_ok());
-       }
-
        #[test]
        fn generates_p2wpkh_from_pubkey_hash() {
                let pubkey_hash = pubkey().wpubkey_hash().unwrap();
index 5da01d3a095862df72fa82984f59cce681b8eddd..c2908e2652ed141cc70d0838e3d976c8709feab8 100644 (file)
@@ -21,7 +21,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction};
 use ln::script::ShutdownScript;
 use util::test_utils;
 use util::test_utils::OnGetShutdownScriptpubkey;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::errors::APIError;
 use util::config::UserConfig;
 
@@ -67,6 +67,8 @@ fn pre_funding_lock_shutdown_test() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -140,6 +142,8 @@ fn updates_shutdown_wait() {
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 
        assert!(nodes[0].node.list_channels().is_empty());
 
@@ -148,6 +152,8 @@ fn updates_shutdown_wait() {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -222,6 +228,9 @@ fn htlc_fail_async_shutdown() {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 fn do_test_shutdown_rebroadcast(recv_count: u8) {
@@ -361,6 +370,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
                let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
                assert!(node_1_none.is_none());
+               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        } else {
                // If one node, however, received + responded with an identical closing_signed we end
                // up erroring and node[0] will try to broadcast its own latest commitment transaction.
@@ -389,6 +399,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                // closing_signed so we do it ourselves
                check_closed_broadcast!(nodes[1], false);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
        }
 
        assert!(nodes[0].node.list_channels().is_empty());
@@ -398,6 +409,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -430,7 +444,8 @@ fn test_upfront_shutdown_script() {
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
        // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
        nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
-    assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
+       assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
+       check_closed_event!(nodes[2], 1, ClosureReason::ProcessingError { err: "Got shutdown request with a scriptpubkey (a91441c98a140039816273e50db317422c11c2bfcc8887) which did not match their previous scriptpubkey.".to_string() });
        check_added_monitors!(nodes[2], 1);
 
        // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
@@ -540,6 +555,7 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() {
                },
                _ => panic!("Unexpected event"),
        }
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)".to_string() });
 }
 
 #[test]
@@ -687,6 +703,7 @@ fn test_unsupported_anysegwit_shutdown_script() {
                _ => panic!("Unexpected event"),
        }
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Got a nonstandard scriptpubkey (60020028) from remote peer".to_string() });
 }
 
 #[test]
@@ -722,6 +739,7 @@ fn test_invalid_shutdown_script() {
                _ => panic!("Unexpected event"),
        }
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Got a nonstandard scriptpubkey (00020000) from remote peer".to_string() });
 }
 
 #[derive(PartialEq)]
@@ -787,7 +805,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
                if timeout_step == TimeoutStep::NoTimeout {
                        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
+                       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
                }
+               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        }
 
        if timeout_step != TimeoutStep::NoTimeout {
@@ -810,6 +830,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                         txn[0].output[0].script_pubkey.is_v0_p2wsh()));
                check_closed_broadcast!(nodes[1], true);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() });
        } else {
                assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
                assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
@@ -869,6 +890,8 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -922,4 +945,6 @@ fn simple_target_feerate_shutdown() {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
index 4caf3543b0f9f02e5ec3b650c8c9a82fa9cc861c..d3cc257f5a6b18cb1776475b4fec69949521e1d6 100644 (file)
@@ -20,7 +20,7 @@ use util::ser::{Readable, Writeable, Writer};
 /// decoders.
 pub trait CustomMessageReader {
        /// The type of the message decoded by the implementation.
-       type CustomMessage: core::fmt::Debug + Type + Writeable;
+       type CustomMessage: Type;
        /// Decodes a custom message to `CustomMessageType`. If the given message type is known to the
        /// implementation and the message could be decoded, must return `Ok(Some(message))`. If the
        /// message type is unknown to the implementation, must return `Ok(None)`. If a decoding error
@@ -245,12 +245,12 @@ pub(crate) use self::encode::Encode;
 /// Defines a type identifier for sending messages over the wire.
 ///
 /// Messages implementing this trait specify a type and must be [`Writeable`].
-pub trait Type {
+pub trait Type: core::fmt::Debug + Writeable {
        /// Returns the type identifying the message payload.
        fn type_id(&self) -> u16;
 }
 
-impl<T> Type for T where T: Encode {
+impl<T: core::fmt::Debug + Writeable> Type for T where T: Encode {
        fn type_id(&self) -> u16 {
                T::TYPE
        }
@@ -457,6 +457,10 @@ mod tests {
                }
        }
 
+       impl Type for () {
+               fn type_id(&self) -> u16 { unreachable!(); }
+       }
+
        #[test]
        fn is_even_message_type() {
                let message = Message::<()>::Unknown(42);
index 16ff80189e96b7dd249d8d96b5112b305e2f272b..bdb305e7870a2b9a30e9236c34b516dc2328c873 100644 (file)
@@ -58,6 +58,18 @@ pub struct NetworkGraph {
        nodes: RwLock<BTreeMap<PublicKey, NodeInfo>>,
 }
 
+impl Clone for NetworkGraph {
+       fn clone(&self) -> Self {
+               let channels = self.channels.read().unwrap();
+               let nodes = self.nodes.read().unwrap();
+               Self {
+                       genesis_hash: self.genesis_hash.clone(),
+                       channels: RwLock::new(channels.clone()),
+                       nodes: RwLock::new(nodes.clone()),
+               }
+       }
+}
+
 /// A read-only view of [`NetworkGraph`].
 pub struct ReadOnlyNetworkGraph<'a> {
        channels: RwLockReadGuard<'a, BTreeMap<u64, ChannelInfo>>,
@@ -113,7 +125,7 @@ impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate,
 impl<C: Deref, L: Deref> EventHandler for NetGraphMsgHandler<C, L>
 where C::Target: chain::Access, L::Target: Logger {
        fn handle_event(&self, event: &Event) {
-               if let Event::PaymentFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event {
+               if let Event::PaymentPathFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event {
                        if let Some(network_update) = network_update {
                                self.handle_network_update(network_update);
                        }
@@ -127,7 +139,7 @@ where C::Target: chain::Access, L::Target: Logger {
 /// Provides interface to help with initial routing sync by
 /// serving historical announcements.
 ///
-/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentFailed`] to the
+/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentPathFailed`] to the
 /// [`NetworkGraph`].
 pub struct NetGraphMsgHandler<C: Deref, L: Deref>
 where C::Target: chain::Access, L::Target: Logger
@@ -1725,10 +1737,11 @@ mod tests {
 
                        assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
 
-                       net_graph_msg_handler.handle_event(&Event::PaymentFailed {
+                       net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
                                payment_hash: PaymentHash([0; 32]),
                                rejected_by_dest: false,
                                all_paths_failed: true,
+                               path: vec![],
                                network_update: Some(NetworkUpdate::ChannelUpdateMessage {
                                        msg: valid_channel_update,
                                }),
@@ -1748,10 +1761,11 @@ mod tests {
                                }
                        };
 
-                       net_graph_msg_handler.handle_event(&Event::PaymentFailed {
+                       net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
                                payment_hash: PaymentHash([0; 32]),
                                rejected_by_dest: false,
                                all_paths_failed: true,
+                               path: vec![],
                                network_update: Some(NetworkUpdate::ChannelClosed {
                                        short_channel_id,
                                        is_permanent: false,
@@ -1770,10 +1784,11 @@ mod tests {
 
                // Permanent closing deletes a channel
                {
-                       net_graph_msg_handler.handle_event(&Event::PaymentFailed {
+                       net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
                                payment_hash: PaymentHash([0; 32]),
                                rejected_by_dest: false,
                                all_paths_failed: true,
+                               path: vec![],
                                network_update: Some(NetworkUpdate::ChannelClosed {
                                        short_channel_id,
                                        is_permanent: true,
index 97c2c7623ea77c2642ab2ec0874ea47ab9d7073d..57e604993cc3ca07a13f031548631478149cf9e2 100644 (file)
@@ -28,7 +28,7 @@ use core::cmp;
 use core::ops::Deref;
 
 /// A hop in a route
-#[derive(Clone, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct RouteHop {
        /// The node_id of the node at this hop.
        pub pubkey: PublicKey,
@@ -71,6 +71,27 @@ pub struct Route {
        pub paths: Vec<Vec<RouteHop>>,
 }
 
+impl Route {
+       /// Returns the total amount of fees paid on this [`Route`].
+       ///
+       /// This doesn't include any extra payment made to the recipient, which can happen in excess of
+       /// the amount passed to [`get_route`]'s `final_value_msat`.
+       pub fn get_total_fees(&self) -> u64 {
+               // Do not count last hop of each path since that's the full value of the payment
+               return self.paths.iter()
+                       .flat_map(|path| path.split_last().map(|(_, path_prefix)| path_prefix).unwrap_or(&[]))
+                       .map(|hop| &hop.fee_msat)
+                       .sum();
+       }
+
+       /// Returns the total amount paid on this [`Route`], excluding the fees.
+       pub fn get_total_amount(&self) -> u64 {
+               return self.paths.iter()
+                       .map(|path| path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0))
+                       .sum();
+       }
+}
+
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
@@ -466,12 +487,14 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                        node_info.features.supports_basic_mpp()
                } else { false }
        } else { false };
+       log_trace!(logger, "Searching for a route from payer {} to payee {} {} MPP", our_node_id, payee,
+               if allow_mpp { "with" } else { "without" });
 
        // Step (1).
        // Prepare the data we'll use for payee-to-payer search by
        // inserting first hops suggested by the caller as targets.
        // Our search will then attempt to reach them while traversing from the payee node.
-       let mut first_hop_targets: HashMap<_, (_, ChannelFeatures, _, NodeFeatures)> =
+       let mut first_hop_targets: HashMap<_, Vec<(_, ChannelFeatures, _, NodeFeatures)>> =
                HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
        if let Some(hops) = first_hops {
                for chan in hops {
@@ -479,7 +502,8 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                        if chan.counterparty.node_id == *our_node_id {
                                return Err(LightningError{err: "First hop cannot have our_node_id as a destination.".to_owned(), action: ErrorAction::IgnoreError});
                        }
-                       first_hop_targets.insert(chan.counterparty.node_id, (short_channel_id, chan.counterparty.features.to_context(), chan.outbound_capacity_msat, chan.counterparty.features.to_context()));
+                       first_hop_targets.entry(chan.counterparty.node_id).or_insert(Vec::new())
+                               .push((short_channel_id, chan.counterparty.features.to_context(), chan.outbound_capacity_msat, chan.counterparty.features.to_context()));
                }
                if first_hop_targets.is_empty() {
                        return Err(LightningError{err: "Cannot route when there are no outbound routes away from us".to_owned(), action: ErrorAction::IgnoreError});
@@ -803,8 +827,8 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                        };
 
                        if !skip_node {
-                               if first_hops.is_some() {
-                                       if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat, _)) = first_hop_targets.get(&$node_id) {
+                               if let Some(first_channels) = first_hop_targets.get(&$node_id) {
+                                       for (ref first_hop, ref features, ref outbound_capacity_msat, _) in first_channels {
                                                add_entry!(first_hop, *our_node_id, $node_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, $fee_to_target_msat, $next_hops_value_contribution, $next_hops_path_htlc_minimum_msat);
                                        }
                                }
@@ -857,9 +881,10 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
 
                // If first hop is a private channel and the only way to reach the payee, this is the only
                // place where it could be added.
-               if first_hops.is_some() {
-                       if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat, _)) = first_hop_targets.get(&payee) {
-                               add_entry!(first_hop, *our_node_id, payee, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, 0, path_value_msat, 0);
+               if let Some(first_channels) = first_hop_targets.get(&payee) {
+                       for (ref first_hop, ref features, ref outbound_capacity_msat, _) in first_channels {
+                               let added = add_entry!(first_hop, *our_node_id, payee, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, 0, path_value_msat, 0);
+                               log_trace!(logger, "{} direct route to payee via SCID {}", if added { "Added" } else { "Skipped" }, first_hop);
                        }
                }
 
@@ -876,7 +901,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                        },
                }
 
-               // Step (1).
+               // Step (2).
                // If a caller provided us with last hops, add them to routing targets. Since this happens
                // earlier than general path finding, they will be somewhat prioritized, although currently
                // it matters only if the fees are exactly the same.
@@ -928,8 +953,10 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                                        }
 
                                        // Searching for a direct channel between last checked hop and first_hop_targets
-                                       if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat, _)) = first_hop_targets.get(&prev_hop_id) {
-                                               add_entry!(first_hop, *our_node_id , prev_hop_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat);
+                                       if let Some(first_channels) = first_hop_targets.get(&prev_hop_id) {
+                                               for (ref first_hop, ref features, ref outbound_capacity_msat, _) in first_channels {
+                                                       add_entry!(first_hop, *our_node_id , prev_hop_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat);
+                                               }
                                        }
 
                                        if !hop_used {
@@ -960,8 +987,10 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                                                // Note that we *must* check if the last hop was added as `add_entry`
                                                // always assumes that the third argument is a node to which we have a
                                                // path.
-                                               if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat, _)) = first_hop_targets.get(&hop.src_node_id) {
-                                                       add_entry!(first_hop, *our_node_id , hop.src_node_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat);
+                                               if let Some(first_channels) = first_hop_targets.get(&hop.src_node_id) {
+                                                       for (ref first_hop, ref features, ref outbound_capacity_msat, _) in first_channels {
+                                                               add_entry!(first_hop, *our_node_id , hop.src_node_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat);
+                                                       }
                                                }
                                        }
                                }
@@ -974,7 +1003,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                // last hops communicated by the caller, and the payment receiver.
                let mut found_new_path = false;
 
-               // Step (2).
+               // Step (3).
                // If this loop terminates due the exhaustion of targets, two situations are possible:
                // - not enough outgoing liquidity:
                //   0 < already_collected_value_msat < final_value_msat
@@ -992,20 +1021,30 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                                let mut ordered_hops = vec!((new_entry.clone(), NodeFeatures::empty()));
 
                                'path_walk: loop {
-                                       if let Some(&(_, _, _, ref features)) = first_hop_targets.get(&ordered_hops.last().unwrap().0.pubkey) {
-                                               ordered_hops.last_mut().unwrap().1 = features.clone();
-                                       } else if let Some(node) = network_nodes.get(&ordered_hops.last().unwrap().0.pubkey) {
-                                               if let Some(node_info) = node.announcement_info.as_ref() {
-                                                       ordered_hops.last_mut().unwrap().1 = node_info.features.clone();
+                                       let mut features_set = false;
+                                       if let Some(first_channels) = first_hop_targets.get(&ordered_hops.last().unwrap().0.pubkey) {
+                                               for (scid, _, _, ref features) in first_channels {
+                                                       if *scid == ordered_hops.last().unwrap().0.short_channel_id {
+                                                               ordered_hops.last_mut().unwrap().1 = features.clone();
+                                                               features_set = true;
+                                                               break;
+                                                       }
+                                               }
+                                       }
+                                       if !features_set {
+                                               if let Some(node) = network_nodes.get(&ordered_hops.last().unwrap().0.pubkey) {
+                                                       if let Some(node_info) = node.announcement_info.as_ref() {
+                                                               ordered_hops.last_mut().unwrap().1 = node_info.features.clone();
+                                                       } else {
+                                                               ordered_hops.last_mut().unwrap().1 = NodeFeatures::empty();
+                                                       }
                                                } else {
-                                                       ordered_hops.last_mut().unwrap().1 = NodeFeatures::empty();
+                                                       // We should be able to fill in features for everything except the last
+                                                       // hop, if the last hop was provided via a BOLT 11 invoice (though we
+                                                       // should be able to extend it further as BOLT 11 does have feature
+                                                       // flags for the last hop node itself).
+                                                       assert!(ordered_hops.last().unwrap().0.pubkey == *payee);
                                                }
-                                       } else {
-                                               // We should be able to fill in features for everything except the last
-                                               // hop, if the last hop was provided via a BOLT 11 invoice (though we
-                                               // should be able to extend it further as BOLT 11 does have feature
-                                               // flags for the last hop node itself).
-                                               assert!(ordered_hops.last().unwrap().0.pubkey == *payee);
                                        }
 
                                        // Means we succesfully traversed from the payer to the payee, now
@@ -1109,7 +1148,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                        break 'paths_collection;
                }
 
-               // Step (3).
+               // Step (4).
                // Stop either when the recommended value is reached or if no new path was found in this
                // iteration.
                // In the latter case, making another path finding attempt won't help,
@@ -1133,7 +1172,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                }
        }
 
-       // Step (4).
+       // Step (5).
        if payment_paths.len() == 0 {
                return Err(LightningError{err: "Failed to find a path to the given destination".to_owned(), action: ErrorAction::IgnoreError});
        }
@@ -1154,12 +1193,12 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                let mut cur_route = Vec::<PaymentPath>::new();
                let mut aggregate_route_value_msat = 0;
 
-               // Step (5).
+               // Step (6).
                // TODO: real random shuffle
                // Currently just starts with i_th and goes up to i-1_th in a looped way.
                let cur_payment_paths = [&payment_paths[i..], &payment_paths[..i]].concat();
 
-               // Step (6).
+               // Step (7).
                for payment_path in cur_payment_paths {
                        cur_route.push(payment_path.clone());
                        aggregate_route_value_msat += payment_path.get_value_msat();
@@ -1198,7 +1237,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
 
                                assert!(cur_route.len() > 0);
 
-                               // Step (7).
+                               // Step (8).
                                // Now, substract the overpaid value from the most-expensive path.
                                // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
                                // so that the sender pays less fees overall. And also htlc_minimum_msat.
@@ -1215,7 +1254,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
                drawn_routes.push(cur_route);
        }
 
-       // Step (8).
+       // Step (9).
        // Select the best route by lowest total fee.
        drawn_routes.sort_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
        let mut selected_paths = Vec::<Vec<RouteHop>>::new();
@@ -1245,7 +1284,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
 
 #[cfg(test)]
 mod tests {
-       use routing::router::{get_route, Route, RouteHint, RouteHintHop, RoutingFees};
+       use routing::router::{get_route, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees};
        use routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
        use chain::transaction::OutPoint;
        use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
@@ -3791,6 +3830,7 @@ mod tests {
                                total_amount_paid_msat += path.last().unwrap().fee_msat;
                        }
                        assert_eq!(total_amount_paid_msat, 200_000);
+                       assert_eq!(route.get_total_fees(), 150_000);
                }
 
        }
@@ -4198,6 +4238,119 @@ mod tests {
                }
        }
 
+       #[test]
+       fn multiple_direct_first_hops() {
+               // Previously we'd only ever considered one first hop path per counterparty.
+               // However, as we don't restrict users to one channel per peer, we really need to support
+               // looking at all first hop paths.
+               // Here we test that we do not ignore all-but-the-last first hop paths per counterparty (as
+               // we used to do by overwriting the `first_hop_targets` hashmap entry) and that we can MPP
+               // route over multiple channels with the same first hop.
+               let secp_ctx = Secp256k1::new();
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let logger = Arc::new(test_utils::TestLogger::new());
+               let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash());
+
+               {
+                       let route = get_route(&our_id, &network_graph, &nodes[0], Some(InvoiceFeatures::known()), Some(&[
+                               &get_channel_details(Some(3), nodes[0], InitFeatures::known(), 200_000),
+                               &get_channel_details(Some(2), nodes[0], InitFeatures::known(), 10_000),
+                       ]), &[], 100_000, 42, Arc::clone(&logger)).unwrap();
+                       assert_eq!(route.paths.len(), 1);
+                       assert_eq!(route.paths[0].len(), 1);
+
+                       assert_eq!(route.paths[0][0].pubkey, nodes[0]);
+                       assert_eq!(route.paths[0][0].short_channel_id, 3);
+                       assert_eq!(route.paths[0][0].fee_msat, 100_000);
+               }
+               {
+                       let route = get_route(&our_id, &network_graph, &nodes[0], Some(InvoiceFeatures::known()), Some(&[
+                               &get_channel_details(Some(3), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(2), nodes[0], InitFeatures::known(), 50_000),
+                       ]), &[], 100_000, 42, Arc::clone(&logger)).unwrap();
+                       assert_eq!(route.paths.len(), 2);
+                       assert_eq!(route.paths[0].len(), 1);
+                       assert_eq!(route.paths[1].len(), 1);
+
+                       assert_eq!(route.paths[0][0].pubkey, nodes[0]);
+                       assert_eq!(route.paths[0][0].short_channel_id, 3);
+                       assert_eq!(route.paths[0][0].fee_msat, 50_000);
+
+                       assert_eq!(route.paths[1][0].pubkey, nodes[0]);
+                       assert_eq!(route.paths[1][0].short_channel_id, 2);
+                       assert_eq!(route.paths[1][0].fee_msat, 50_000);
+               }
+       }
+
+       #[test]
+       fn total_fees_single_path() {
+               let route = Route {
+                       paths: vec![vec![
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 100, cltv_expiry_delta: 0
+                               },
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 150, cltv_expiry_delta: 0
+                               },
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 225, cltv_expiry_delta: 0
+                               },
+                       ]],
+               };
+
+               assert_eq!(route.get_total_fees(), 250);
+               assert_eq!(route.get_total_amount(), 225);
+       }
+
+       #[test]
+       fn total_fees_multi_path() {
+               let route = Route {
+                       paths: vec![vec![
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 100, cltv_expiry_delta: 0
+                               },
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 150, cltv_expiry_delta: 0
+                               },
+                       ],vec![
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 100, cltv_expiry_delta: 0
+                               },
+                               RouteHop {
+                                       pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
+                                       channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
+                                       short_channel_id: 0, fee_msat: 150, cltv_expiry_delta: 0
+                               },
+                       ]],
+               };
+
+               assert_eq!(route.get_total_fees(), 200);
+               assert_eq!(route.get_total_amount(), 300);
+       }
+
+       #[test]
+       fn total_empty_route_no_panic() {
+               // In an earlier version of `Route::get_total_fees` and `Route::get_total_amount`, they
+               // would both panic if the route was completely empty. We test to ensure they return 0
+               // here, even though its somewhat nonsensical as a route.
+               let route = Route { paths: Vec::new() };
+
+               assert_eq!(route.get_total_fees(), 0);
+               assert_eq!(route.get_total_amount(), 0);
+       }
+
        #[cfg(not(feature = "no-std"))]
        pub(super) fn random_init_seed() -> u64 {
                // Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
index ad82ca6cab9017051a64ff5bbb66bf8e56ae1537..2b6a55bcecc66bb54c87b6d7349efb1aa9c6ef81 100644 (file)
 
 use chain::keysinterface::SpendableOutputDescriptor;
 use ln::msgs;
+use ln::msgs::DecodeError;
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 use routing::network_graph::NetworkUpdate;
-use util::ser::{Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
+use util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
+use routing::router::RouteHop;
 
 use bitcoin::blockdata::script::Script;
 use bitcoin::hashes::Hash;
@@ -69,6 +71,60 @@ pub enum PaymentPurpose {
        SpontaneousPayment(PaymentPreimage),
 }
 
+#[derive(Clone, Debug, PartialEq)]
+/// The reason the channel was closed. See individual variants more details.
+pub enum ClosureReason {
+       /// Closure generated from receiving a peer error message.
+       ///
+       /// Our counterparty may have broadcasted their latest commitment state, and we have
+       /// as well.
+       CounterpartyForceClosed {
+               /// The error which the peer sent us.
+               ///
+               /// The string should be sanitized before it is used (e.g emitted to logs
+               /// or printed to stdout). Otherwise, a well crafted error message may exploit
+               /// a security vulnerability in the terminal emulator or the logging subsystem.
+               peer_msg: String,
+       },
+       /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
+       ///
+       /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel.
+       HolderForceClosed,
+       /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+       /// the cooperative close transaction. Note the shutdown may have been initiated by us.
+       //TODO: split between CounterpartyInitiated/LocallyInitiated
+       CooperativeClosure,
+       /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
+       /// commitment transaction came from our counterparty, but it may also have come from
+       /// a copy of our own `ChannelMonitor`.
+       CommitmentTxConfirmed,
+       /// Closure generated from processing an event, likely a HTLC forward/relay/reception.
+       ProcessingError {
+               /// A developer-readable error message which we generated.
+               err: String,
+       },
+       /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
+       /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
+       /// peer again in the future or if the peer disconnected before we finished negotiating
+       /// the channel open. The first case may be caused by incompatible features which our
+       /// counterparty, or we, require.
+       //TODO: split between PeerUnconnectable/PeerDisconnected ?
+       DisconnectedPeer,
+       /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
+       /// the ChannelManager deserialized.
+       OutdatedChannelManager
+}
+
+impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
+       (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
+       (2, HolderForceClosed) => {},
+       (6, CommitmentTxConfirmed) => {},
+       (4, CooperativeClosure) => {},
+       (8, ProcessingError) => { (1, err, required) },
+       (10, DisconnectedPeer) => {},
+       (12, OutdatedChannelManager) => {},
+);
+
 /// An Event which you should probably take some action in response to.
 ///
 /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
@@ -93,21 +149,20 @@ pub enum Event {
                user_channel_id: u64,
        },
        /// Indicates we've received money! Just gotta dig out that payment preimage and feed it to
-       /// ChannelManager::claim_funds to get it....
-       /// Note that if the preimage is not known or the amount paid is incorrect, you should call
-       /// ChannelManager::fail_htlc_backwards to free up resources for this HTLC and avoid
+       /// [`ChannelManager::claim_funds`] to get it....
+       /// Note that if the preimage is not known, you should call
+       /// [`ChannelManager::fail_htlc_backwards`] to free up resources for this HTLC and avoid
        /// network congestion.
-       /// The amount paid should be considered 'incorrect' when it is less than or more than twice
-       /// the amount expected.
-       /// If you fail to call either ChannelManager::claim_funds or
-       /// ChannelManager::fail_htlc_backwards within the HTLC's timeout, the HTLC will be
+       /// If you fail to call either [`ChannelManager::claim_funds`] or
+       /// [`ChannelManager::fail_htlc_backwards`] within the HTLC's timeout, the HTLC will be
        /// automatically failed.
+       ///
+       /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+       /// [`ChannelManager::fail_htlc_backwards`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards
        PaymentReceived {
                /// The hash for which the preimage should be handed to the ChannelManager.
                payment_hash: PaymentHash,
-               /// The value, in thousandths of a satoshi, that this payment is for. Note that you must
-               /// compare this to the expected value before accepting the payment (as otherwise you are
-               /// providing proof-of-payment for less than the value you expected!).
+               /// The value, in thousandths of a satoshi, that this payment is for.
                amt: u64,
                /// Information for claiming this received payment, based on whether the purpose of the
                /// payment is to pay an invoice or to send a spontaneous payment.
@@ -116,8 +171,8 @@ pub enum Event {
        /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
        ///
-       /// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentFailed` event.
-       /// In this situation, you SHOULD treat this payment as having succeeded.
+       /// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentPathFailed`
+       /// event. In this situation, you SHOULD treat this payment as having succeeded.
        PaymentSent {
                /// The preimage to the hash given to ChannelManager::send_payment.
                /// Note that this serves as a payment receipt, if you wish to have such a thing, you must
@@ -130,7 +185,7 @@ pub enum Event {
        },
        /// Indicates an outbound payment we made failed. Probably some intermediary node dropped
        /// something. You may wish to retry with a different route.
-       PaymentFailed {
+       PaymentPathFailed {
                /// The hash which was given to ChannelManager::send_payment.
                payment_hash: PaymentHash,
                /// Indicates the payment was rejected for some reason by the recipient. This implies that
@@ -150,6 +205,8 @@ pub enum Event {
                /// failed. This will be set to false if (1) this is an MPP payment and (2) other parts of the
                /// larger MPP payment were still in flight when this event was generated.
                all_paths_failed: bool,
+               /// The payment path that failed.
+               path: Vec<RouteHop>,
 #[cfg(test)]
                error_code: Option<u16>,
 #[cfg(test)]
@@ -194,6 +251,14 @@ pub enum Event {
                /// transaction.
                claim_from_onchain_tx: bool,
        },
+       /// Used to indicate that a channel with the given `channel_id` is in the process of closure.
+       ChannelClosed  {
+               /// The channel_id of the channel which has been closed. Note that on-chain transactions
+               /// resolving the channel are likely still awaiting confirmation.
+               channel_id: [u8; 32],
+               /// The reason the channel was closed.
+               reason: ClosureReason
+       }
 }
 
 impl Writeable for Event {
@@ -234,7 +299,8 @@ impl Writeable for Event {
                                        (1, payment_hash, required),
                                });
                        },
-                       &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref all_paths_failed,
+                       &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update,
+                                                   ref all_paths_failed, ref path,
                                #[cfg(test)]
                                ref error_code,
                                #[cfg(test)]
@@ -250,13 +316,13 @@ impl Writeable for Event {
                                        (1, network_update, option),
                                        (2, rejected_by_dest, required),
                                        (3, all_paths_failed, required),
+                                       (5, path, vec_type),
                                });
                        },
                        &Event::PendingHTLCsForwardable { time_forwardable: _ } => {
                                4u8.write(writer)?;
-                               write_tlv_fields!(writer, {});
-                               // We don't write the time_fordwardable out at all, as we presume when the user
-                               // deserializes us at least that much time has elapsed.
+                               // Note that we now ignore these on the read end as we'll re-generate them in
+                               // ChannelManager, we write them here only for backwards compatibility.
                        },
                        &Event::SpendableOutputs { ref outputs } => {
                                5u8.write(writer)?;
@@ -271,6 +337,16 @@ impl Writeable for Event {
                                        (2, claim_from_onchain_tx, required),
                                });
                        },
+                       &Event::ChannelClosed { ref channel_id, ref reason } => {
+                               9u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, channel_id, required),
+                                       (2, reason, required)
+                               });
+                       },
+                       // Note that, going forward, all new events must only write data inside of
+                       // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
+                       // data via `write_tlv_fields`.
                }
                Ok(())
        }
@@ -278,6 +354,8 @@ impl Writeable for Event {
 impl MaybeReadable for Event {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
                match Readable::read(reader)? {
+                       // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
+                       // unlike all other events, thus we return immediately here.
                        0u8 => Ok(None),
                        1u8 => {
                                let f = || {
@@ -340,17 +418,20 @@ impl MaybeReadable for Event {
                                        let mut rejected_by_dest = false;
                                        let mut network_update = None;
                                        let mut all_paths_failed = Some(true);
+                                       let mut path: Option<Vec<RouteHop>> = Some(vec![]);
                                        read_tlv_fields!(reader, {
                                                (0, payment_hash, required),
                                                (1, network_update, ignorable),
                                                (2, rejected_by_dest, required),
                                                (3, all_paths_failed, option),
+                                               (5, path, vec_type),
                                        });
-                                       Ok(Some(Event::PaymentFailed {
+                                       Ok(Some(Event::PaymentPathFailed {
                                                payment_hash,
                                                rejected_by_dest,
                                                network_update,
                                                all_paths_failed: all_paths_failed.unwrap(),
+                                               path: path.unwrap(),
                                                #[cfg(test)]
                                                error_code,
                                                #[cfg(test)]
@@ -359,15 +440,7 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
-                       4u8 => {
-                               let f = || {
-                                       read_tlv_fields!(reader, {});
-                                       Ok(Some(Event::PendingHTLCsForwardable {
-                                               time_forwardable: Duration::from_secs(0)
-                                       }))
-                               };
-                               f()
-                       },
+                       4u8 => Ok(None),
                        5u8 => {
                                let f = || {
                                        let mut outputs = VecReadWrapper(Vec::new());
@@ -390,8 +463,30 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       9u8 => {
+                               let mut channel_id = [0; 32];
+                               let mut reason = None;
+                               read_tlv_fields!(reader, {
+                                       (0, channel_id, required),
+                                       (2, reason, ignorable),
+                               });
+                               if reason.is_none() { return Ok(None); }
+                               Ok(Some(Event::ChannelClosed { channel_id, reason: reason.unwrap() }))
+                       },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
-                       x if x % 2 == 1 => Ok(None),
+                       // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
+                       // reads.
+                       x if x % 2 == 1 => {
+                               // If the event is of unknown type, assume it was written with `write_tlv_fields`,
+                               // which prefixes the whole thing with a length BigSize. Because the event is
+                               // odd-type unknown, we should treat it as `Ok(None)` even if it has some TLV
+                               // fields that are even. Thus, we avoid using `read_tlv_fields` and simply read
+                               // exactly the number of bytes specified, ignoring them entirely.
+                               let tlv_len: BigSize = Readable::read(reader)?;
+                               FixedLengthReader::new(reader, tlv_len.0)
+                                       .eat_remaining().map_err(|_| msgs::DecodeError::ShortRead)?;
+                               Ok(None)
+                       },
                        _ => Err(msgs::DecodeError::InvalidValue)
                }
        }
index a6991c5c3d0d2a953dea4ba11993c446e37385ee..47bdf04f8750ac8d2562f032c7232d997f36c3b4 100644 (file)
@@ -879,8 +879,29 @@ impl<A: Writeable, B: Writeable, C: Writeable> Writeable for (A, B, C) {
        }
 }
 
+impl Writeable for () {
+       fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
+               Ok(())
+       }
+}
 impl Readable for () {
        fn read<R: Read>(_r: &mut R) -> Result<Self, DecodeError> {
                Ok(())
        }
 }
+
+impl Writeable for String {
+       #[inline]
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               (self.len() as u16).write(w)?;
+               w.write_all(self.as_bytes())
+       }
+}
+impl Readable for String {
+       #[inline]
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let v: Vec<u8> = Readable::read(r)?;
+               let ret = String::from_utf8(v).map_err(|_| DecodeError::InvalidValue)?;
+               Ok(ret)
+       }
+}