Communication about the development of LDK and `rust-lightning` happens
primarily on the [LDK Discord](https://discord.gg/5AcknnMfBw) in the `#ldk-dev`
-channel. Additionally, live LDK devevelopment meetings are held every other
-Monday 19:00 UTC in the [LDK Dev Jitsi Meeting
+channel. Additionally, live LDK development meetings are held every other
+Monday 17:00 UTC in the [LDK Dev Jitsi Meeting
Room](https://meet.jit.si/ldkdevmeeting). Upcoming events can be found in the
[LDK calendar](https://calendar.google.com/calendar/embed?src=c_e6fv6vlshbpoob2mmbvblkkoj4%40group.calendar.google.com).
Design Goal
-----------
-The goal is to provide a full-featured but also incredibly flexible Lightning
-implementation, allowing the user to decide how they wish to use it. With that
+The goal is to provide a fully-featured and incredibly flexible Lightning
+implementation, allowing users to decide how they wish to use it. With that
in mind, everything should be exposed via simple, composable APIs. More
information about `rust-lightning`'s flexibility is provided in the `About`
section above.
PaymentSendFailure::PathParameterError(per_path_results) => {
for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } }
},
- PaymentSendFailure::AllFailedRetrySafe(per_path_results) => {
+ PaymentSendFailure::AllFailedResendSafe(per_path_results) => {
for api_err in per_path_results { check_api_err(api_err); }
},
PaymentSendFailure::PartialFailure { results, .. } => {
for res in results { if let Err(api_err) = res { check_api_err(api_err); } }
},
+ PaymentSendFailure::DuplicatePayment => panic!(),
}
}
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, ctr]).unwrap(),
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, ctr],
channel_value_satoshis,
- [0; 32]
+ [0; 32],
)
} else {
InMemorySigner::new(
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, ctr]).unwrap(),
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, ctr],
channel_value_satoshis,
- [0; 32]
+ [0; 32],
)
})
}
// Adding new calls to `KeysInterface::get_secure_random_bytes` during startup can change all the
// keys subsequently generated in this test. Rather than regenerating all the messages manually,
// it's easier to just increment the counter here so the keys don't change.
- keys_manager.counter.fetch_sub(2, Ordering::AcqRel);
+ keys_manager.counter.fetch_sub(3, Ordering::AcqRel);
let our_id = PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret(Recipient::Node).unwrap());
let network_graph = Arc::new(NetworkGraph::new(genesis_block(network).block_hash(), Arc::clone(&logger)));
let gossip_sync = Arc::new(P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger)));
// 030012 - inbound read from peer id 0 of len 18
// 05ac 03000000000000000000000000000000 - message header indicating message length 1452
// 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300ff - inbound read from peer id 0 of len 255
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4e00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
//
// 030012 - inbound read from peer id 0 of len 18
// 0064 03000000000000000000000000000000 - message header indicating message length 100
// 030012 - inbound read from peer id 0 of len 18
// 05ac 03000000000000000000000000000000 - message header indicating message length 1452
// 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300ff - inbound read from peer id 0 of len 255
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4e00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
//
// - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
// 030012 - inbound read from peer id 0 of len 18
// 030012 - inbound read from peer id 0 of len 18
// 05ac 03000000000000000000000000000000 - message header indicating message length 1452
// 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 12 02030927c0 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300ff - inbound read from peer id 0 of len 255
// 0300ff - inbound read from peer id 0 of len 255
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
//
// 030012 - inbound read from peer id 0 of len 18
// 00a4 03000000000000000000000000000000 - message header indicating message length 164
// - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
- super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+ super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
let log_entries = logger.lines.lock().unwrap();
assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
user_channel_id: 0, inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
+ confirmations: None,
force_close_spend_delay: None,
is_outbound: true, is_channel_ready: true,
is_usable: true, is_public: true,
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
-use lightning::chain::keysinterface::{Sign, KeysInterface};
+use lightning::chain::keysinterface::KeysInterface;
use lightning::ln::channelmanager::ChannelManager;
use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
}
}
-/// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
-struct DecoratingEventHandler<
- 'a,
- E: EventHandler,
- PGS: Deref<Target = P2PGossipSync<G, A, L>>,
- RGS: Deref<Target = RapidGossipSync<G, L>>,
- G: Deref<Target = NetworkGraph<L>>,
- A: Deref,
- L: Deref,
->
-where A::Target: chain::Access, L::Target: Logger {
- event_handler: E,
- gossip_sync: &'a GossipSync<PGS, RGS, G, A, L>,
-}
-
-impl<
- 'a,
- E: EventHandler,
- PGS: Deref<Target = P2PGossipSync<G, A, L>>,
- RGS: Deref<Target = RapidGossipSync<G, L>>,
- G: Deref<Target = NetworkGraph<L>>,
- A: Deref,
- L: Deref,
-> EventHandler for DecoratingEventHandler<'a, E, PGS, RGS, G, A, L>
-where A::Target: chain::Access, L::Target: Logger {
- fn handle_event(&self, event: &Event) {
- if let Some(network_graph) = self.gossip_sync.network_graph() {
- network_graph.handle_event(event);
+fn handle_network_graph_update<L: Deref>(
+ network_graph: &NetworkGraph<L>, event: &Event
+) where L::Target: Logger {
+ if let Event::PaymentPathFailed { ref network_update, .. } = event {
+ if let Some(network_update) = network_update {
+ network_graph.handle_network_update(&network_update);
}
- self.event_handler.handle_event(event);
}
}
macro_rules! define_run_body {
- ($persister: ident, $event_handler: ident, $chain_monitor: ident, $channel_manager: ident,
+ ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
+ $channel_manager: ident, $process_channel_manager_events: expr,
$gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
$loop_exit_check: expr, $await: expr)
=> { {
- let event_handler = DecoratingEventHandler {
- event_handler: $event_handler,
- gossip_sync: &$gossip_sync,
- };
-
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
$channel_manager.timer_tick_occurred();
let mut have_pruned = false;
loop {
- $channel_manager.process_pending_events(&event_handler);
- $chain_monitor.process_pending_events(&event_handler);
+ $process_channel_manager_events;
+ $process_chain_monitor_events;
// Note that the PeerManager::process_events may block on ChannelManager's locks,
// hence it comes last here. When the ChannelManager finishes whatever it's doing,
// continuing our normal cadence.
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
// The network graph must not be pruned while rapid sync completion is pending
- log_trace!($logger, "Assessing prunability of network graph");
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
+ log_trace!($logger, "Pruning and persisting network graph.");
network_graph.remove_stale_channels_and_tracking();
if let Err(e) = $persister.persist_graph(network_graph) {
last_prune_call = Instant::now();
have_pruned = true;
- } else {
- log_trace!($logger, "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph.");
}
}
/// Processes background events in a future.
///
/// `sleeper` should return a future which completes in the given amount of time and returns a
-/// boolean indicating whether the background processing should continue. Once `sleeper` returns a
-/// future which outputs false, the loop will exit and this function's future will complete.
+/// boolean indicating whether the background processing should exit. Once `sleeper` returns a
+/// future which outputs true, the loop will exit and this function's future will complete.
///
/// See [`BackgroundProcessor::start`] for information on which actions this handles.
#[cfg(feature = "futures")]
pub async fn process_events_async<
'a,
- Signer: 'static + Sign,
CA: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
CW: 'static + Deref + Send + Sync,
CMH: 'static + Deref + Send + Sync,
RMH: 'static + Deref + Send + Sync,
OMH: 'static + Deref + Send + Sync,
- EH: 'static + EventHandler + Send,
+ EventHandlerFuture: core::future::Future<Output = ()>,
+ EventHandler: Fn(Event) -> EventHandlerFuture,
PS: 'static + Deref + Send,
- M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
+ M: 'static + Deref<Target = ChainMonitor<<K::Target as KeysInterface>::Signer, CF, T, F, L, P>> + Send + Sync,
CM: 'static + Deref<Target = ChannelManager<CW, T, K, F, L>> + Send + Sync,
PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
SleepFuture: core::future::Future<Output = bool>,
Sleeper: Fn(Duration) -> SleepFuture
>(
- persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
+ persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
gossip_sync: GossipSync<PGS, RGS, G, CA, L>, peer_manager: PM, logger: L, scorer: Option<S>,
sleeper: Sleeper,
) -> Result<(), std::io::Error>
where
CA::Target: 'static + chain::Access,
CF::Target: 'static + chain::Filter,
- CW::Target: 'static + chain::Watch<Signer>,
+ CW::Target: 'static + chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: 'static + BroadcasterInterface,
- K::Target: 'static + KeysInterface<Signer = Signer>,
+ K::Target: 'static + KeysInterface,
F::Target: 'static + FeeEstimator,
L::Target: 'static + Logger,
- P::Target: 'static + Persist<Signer>,
+ P::Target: 'static + Persist<<K::Target as KeysInterface>::Signer>,
CMH::Target: 'static + ChannelMessageHandler,
OMH::Target: 'static + OnionMessageHandler,
RMH::Target: 'static + RoutingMessageHandler,
UMH::Target: 'static + CustomMessageHandler,
- PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
+ PS::Target: 'static + Persister<'a, CW, T, K, F, L, SC>,
{
- let mut should_continue = true;
- define_run_body!(persister, event_handler, chain_monitor, channel_manager,
- gossip_sync, peer_manager, logger, scorer, should_continue, {
+ let mut should_break = true;
+ let async_event_handler = |event| {
+ let network_graph = gossip_sync.network_graph();
+ let event_handler = &event_handler;
+ async move {
+ if let Some(network_graph) = network_graph {
+ handle_network_graph_update(network_graph, &event)
+ }
+ event_handler(event).await;
+ }
+ };
+ define_run_body!(persister,
+ chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
+ channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
+ gossip_sync, peer_manager, logger, scorer, should_break, {
select_biased! {
_ = channel_manager.get_persistable_update_future().fuse() => true,
- cont = sleeper(Duration::from_millis(100)).fuse() => {
- should_continue = cont;
+ exit = sleeper(Duration::from_millis(100)).fuse() => {
+ should_break = exit;
false
}
}
/// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
pub fn start<
'a,
- Signer: 'static + Sign,
CA: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
CW: 'static + Deref + Send + Sync,
RMH: 'static + Deref + Send + Sync,
EH: 'static + EventHandler + Send,
PS: 'static + Deref + Send,
- M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
+ M: 'static + Deref<Target = ChainMonitor<<K::Target as KeysInterface>::Signer, CF, T, F, L, P>> + Send + Sync,
CM: 'static + Deref<Target = ChannelManager<CW, T, K, F, L>> + Send + Sync,
PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
where
CA::Target: 'static + chain::Access,
CF::Target: 'static + chain::Filter,
- CW::Target: 'static + chain::Watch<Signer>,
+ CW::Target: 'static + chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: 'static + BroadcasterInterface,
- K::Target: 'static + KeysInterface<Signer = Signer>,
+ K::Target: 'static + KeysInterface,
F::Target: 'static + FeeEstimator,
L::Target: 'static + Logger,
- P::Target: 'static + Persist<Signer>,
+ P::Target: 'static + Persist<<K::Target as KeysInterface>::Signer>,
CMH::Target: 'static + ChannelMessageHandler,
OMH::Target: 'static + OnionMessageHandler,
RMH::Target: 'static + RoutingMessageHandler,
UMH::Target: 'static + CustomMessageHandler,
- PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
+ PS::Target: 'static + Persister<'a, CW, T, K, F, L, SC>,
{
let stop_thread = Arc::new(AtomicBool::new(false));
let stop_thread_clone = stop_thread.clone();
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
- define_run_body!(persister, event_handler, chain_monitor, channel_manager,
+ let event_handler = |event| {
+ let network_graph = gossip_sync.network_graph();
+ if let Some(network_graph) = network_graph {
+ handle_network_graph_update(network_graph, &event)
+ }
+ event_handler.handle_event(event);
+ };
+ define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
+ channel_manager, channel_manager.process_pending_events(&event_handler),
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
channel_manager.await_persistable_update_timeout(Duration::from_millis(100)))
});
begin_open_channel!($node_a, $node_b, $channel_value);
let events = $node_a.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- let (temporary_channel_id, tx) = handle_funding_generation_ready!(&events[0], $channel_value);
+ let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
tx
}}
macro_rules! handle_funding_generation_ready {
($event: expr, $channel_value: expr) => {{
match $event {
- &Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
+ Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
assert_eq!(channel_value_satoshis, $channel_value);
assert_eq!(user_channel_id, 42);
// Initiate the background processors to watch each node.
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
macro_rules! check_persisted_data {
let nodes = create_nodes(1, "test_timer_tick_called".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
match bg_processor.join() {
Ok(_) => panic!("Expected error persisting manager"),
let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
match bg_processor.stop() {
let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
match bg_processor.stop() {
// Set up a background event handler for FundingGenerationReady events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
- let event_handler = move |event: &Event| match event {
+ let event_handler = move |event: Event| match event {
Event::FundingGenerationReady { .. } => sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
Event::ChannelReady { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
// Set up a background event handler for SpendableOutputs events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
- let event_handler = move |event: &Event| match event {
+ let event_handler = move |event: Event| match event {
Event::SpendableOutputs { .. } => sender.send(event.clone()).unwrap(),
Event::ChannelReady { .. } => {},
Event::ChannelClosed { .. } => {},
let nodes = create_nodes(2, "test_scorer_persistence".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
loop {
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
assert_eq!(network_graph.read_only().channels().len(), 1);
- let event_handler = |_: &_| {};
+ let event_handler = |_: _| {};
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
- let expected_log_a = "Assessing prunability of network graph".to_string();
- let expected_log_b = "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph.".to_string();
- if log_entries.get(&("lightning_background_processor".to_string(), expected_log_a)).is_some() &&
- log_entries.get(&("lightning_background_processor".to_string(), expected_log_b)).is_some() {
+ let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
+ if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
+ .unwrap_or(&0) > 1
+ {
+ // Wait until the loop has gone around at least twice.
break
}
}
let data_dir = nodes[0].persister.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes, Arc::clone(&nodes[0].scorer));
- let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].logger), |_: &_| {}, Retry::Attempts(2)));
+ let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].logger), |_: _| {}, Retry::Attempts(2)));
let event_handler = Arc::clone(&invoice_payer);
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
assert!(bg_processor.stop().is_ok());
///
/// async fn init_sync<
/// B: BlockSource,
-/// K: KeysInterface<Signer = S>,
-/// S: keysinterface::Sign,
+/// K: KeysInterface,
/// T: BroadcasterInterface,
/// F: FeeEstimator,
/// L: Logger,
/// C: chain::Filter,
-/// P: chainmonitor::Persist<S>,
+/// P: chainmonitor::Persist<K::Signer>,
/// >(
/// block_source: &B,
-/// chain_monitor: &ChainMonitor<S, &C, &T, &F, &L, &P>,
+/// chain_monitor: &ChainMonitor<K::Signer, &C, &T, &F, &L, &P>,
/// config: UserConfig,
/// keys_manager: &K,
/// tx_broadcaster: &T,
/// ) {
/// // Read a serialized channel monitor paired with the block hash when it was persisted.
/// let serialized_monitor = "...";
-/// let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor<S>)>::read(
+/// let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor<K::Signer>)>::read(
/// &mut Cursor::new(&serialized_monitor), keys_manager).unwrap();
///
/// // Read the channel manager paired with the block hash when it was persisted.
/// config,
/// vec![&mut monitor],
/// );
-/// <(BlockHash, ChannelManager<&ChainMonitor<S, &C, &T, &F, &L, &P>, &T, &K, &F, &L>)>::read(
+/// <(BlockHash, ChannelManager<&ChainMonitor<K::Signer, &C, &T, &F, &L, &P>, &T, &K, &F, &L>)>::read(
/// &mut Cursor::new(&serialized_manager), read_args).unwrap()
/// };
///
//! # &self, route: &Route, payment_id: PaymentId
//! # ) -> Result<(), PaymentSendFailure> { unimplemented!() }
//! # fn abandon_payment(&self, payment_id: PaymentId) { unimplemented!() }
+//! # fn inflight_htlcs(&self) -> InFlightHtlcs { unimplemented!() }
//! # }
//! #
//! # struct FakeRouter {}
//! # }
//! #
//! # fn main() {
-//! let event_handler = |event: &Event| {
+//! let event_handler = |event: Event| {
//! match event {
//! Event::PaymentPathFailed { .. } => println!("payment failed after retries"),
//! Event::PaymentSent { .. } => println!("payment successful"),
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure};
use lightning::ln::msgs::LightningError;
-use lightning::routing::gossip::NodeId;
use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use lightning::util::errors::APIError;
use lightning::util::events::{Event, EventHandler};
use lightning::util::logger::Logger;
use crate::time_utils::Time;
use core::fmt;
use core::fmt::{Debug, Display, Formatter};
+use core::future::Future;
use core::ops::Deref;
use core::time::Duration;
#[cfg(feature = "std")]
#[cfg(feature = "no-std")]
type ConfiguredTime = time_utils::Eternity;
+/// Sealed trait with a blanket implementation to allow both sync and async implementations of event
+/// handling to exist within the InvoicePayer.
+mod sealed {
+ pub trait BaseEventHandler {}
+ impl<T> BaseEventHandler for T {}
+}
+
/// (C-not exported) generally all users should use the [`InvoicePayer`] type alias.
-pub struct InvoicePayerUsingTime<P: Deref, R: ScoringRouter, L: Deref, E: EventHandler, T: Time>
-where
+pub struct InvoicePayerUsingTime<
+ P: Deref,
+ R: ScoringRouter,
+ L: Deref,
+ E: sealed::BaseEventHandler,
+ T: Time
+> where
P::Target: Payer,
L::Target: Logger,
{
logger: L,
event_handler: E,
/// Caches the overall attempts at making a payment, which is updated prior to retrying.
- payment_cache: Mutex<HashMap<PaymentHash, PaymentInfo<T>>>,
+ payment_cache: Mutex<HashMap<PaymentHash, PaymentAttempts<T>>>,
retry: Retry,
}
-/// Used by [`InvoicePayerUsingTime::payment_cache`] to track the payments that are either
-/// currently being made, or have outstanding paths that need retrying.
-struct PaymentInfo<T: Time> {
- attempts: PaymentAttempts<T>,
- paths: Vec<Vec<RouteHop>>,
-}
-
-impl<T: Time> PaymentInfo<T> {
- fn new() -> Self {
- PaymentInfo {
- attempts: PaymentAttempts::new(),
- paths: vec![],
- }
- }
-}
-
/// Storing minimal payment attempts information required for determining if a outbound payment can
/// be retried.
#[derive(Clone, Copy)]
/// Signals that no further retries for the given payment will occur.
fn abandon_payment(&self, payment_id: PaymentId);
+
+ /// Construct an [`InFlightHtlcs`] containing information about currently used up liquidity
+ /// across payments.
+ fn inflight_htlcs(&self) -> InFlightHtlcs;
}
/// A trait defining behavior for a [`Router`] implementation that also supports scoring channels
Sending(PaymentSendFailure),
}
-impl<P: Deref, R: ScoringRouter, L: Deref, E: EventHandler, T: Time> InvoicePayerUsingTime<P, R, L, E, T>
+impl<P: Deref, R: ScoringRouter, L: Deref, E: sealed::BaseEventHandler, T: Time>
+ InvoicePayerUsingTime<P, R, L, E, T>
where
P::Target: Payer,
L::Target: Logger,
let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner());
match self.payment_cache.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
- hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
+ hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
};
let payment_secret = Some(invoice.payment_secret().clone());
) -> Result<(), PaymentError> {
match self.payment_cache.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
- hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
+ hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
};
let route_params = RouteParameters {
let payer = self.payer.node_id();
let first_hops = self.payer.first_hops();
- let inflight_htlcs = self.create_inflight_map();
+ let inflight_htlcs = self.payer.inflight_htlcs();
let route = self.router.find_route(
&payer, ¶ms, Some(&first_hops.iter().collect::<Vec<_>>()), inflight_htlcs
).map_err(|e| PaymentError::Routing(e))?;
match send_payment(&route) {
- Ok(()) => {
- for path in route.paths {
- self.process_path_inflight_htlcs(payment_hash, path);
- }
- Ok(())
- },
+ Ok(()) => Ok(()),
Err(e) => match e {
PaymentSendFailure::ParameterError(_) => Err(e),
PaymentSendFailure::PathParameterError(_) => Err(e),
- PaymentSendFailure::AllFailedRetrySafe(_) => {
+ PaymentSendFailure::DuplicatePayment => Err(e),
+ PaymentSendFailure::AllFailedResendSafe(_) => {
let mut payment_cache = self.payment_cache.lock().unwrap();
- let payment_info = payment_cache.get_mut(&payment_hash).unwrap();
- payment_info.attempts.count += 1;
- if self.retry.is_retryable_now(&payment_info.attempts) {
+ let payment_attempts = payment_cache.get_mut(&payment_hash).unwrap();
+ payment_attempts.count += 1;
+ if self.retry.is_retryable_now(payment_attempts) {
core::mem::drop(payment_cache);
Ok(self.pay_internal(params, payment_hash, send_payment)?)
} else {
Err(e)
}
},
- PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, results } => {
- // If a `PartialFailure` event returns a result that is an `Ok()`, it means that
- // part of our payment is retried. When we receive `MonitorUpdateInProgress`, it
- // means that we are still waiting for our channel monitor update to be completed.
- for (result, path) in results.iter().zip(route.paths.into_iter()) {
- match result {
- Ok(_) | Err(APIError::MonitorUpdateInProgress) => {
- self.process_path_inflight_htlcs(payment_hash, path);
- },
- _ => {},
- }
- }
-
+ PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, .. } => {
if let Some(retry_data) = failed_paths_retry {
// Some paths were sent, even if we failed to send the full MPP value our
// recipient may misbehave and claim the funds, at which point we have to
}.map_err(|e| PaymentError::Sending(e))
}
- // Takes in a path to have its information stored in `payment_cache`. This is done for paths
- // that are pending retry.
- fn process_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: Vec<RouteHop>) {
- self.payment_cache.lock().unwrap().entry(payment_hash)
- .or_insert_with(|| PaymentInfo::new())
- .paths.push(path);
- }
-
- // Find the path we want to remove in `payment_cache`. If it doesn't exist, do nothing.
- fn remove_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: &Vec<RouteHop>) {
- self.payment_cache.lock().unwrap().entry(payment_hash)
- .and_modify(|payment_info| {
- if let Some(idx) = payment_info.paths.iter().position(|p| p == path) {
- payment_info.paths.swap_remove(idx);
- }
- });
- }
-
fn retry_payment(
&self, payment_id: PaymentId, payment_hash: PaymentHash, params: &RouteParameters
) -> Result<(), ()> {
- let attempts = self.payment_cache.lock().unwrap().entry(payment_hash)
- .and_modify(|info| info.attempts.count += 1 )
- .or_insert_with(|| PaymentInfo {
- attempts: PaymentAttempts {
+ let attempts =
+ *self.payment_cache.lock().unwrap().entry(payment_hash)
+ .and_modify(|attempts| attempts.count += 1)
+ .or_insert(PaymentAttempts {
count: 1,
- first_attempted_at: T::now(),
- },
- paths: vec![],
- }).attempts;
+ first_attempted_at: T::now()
+ });
if !self.retry.is_retryable_now(&attempts) {
log_trace!(self.logger, "Payment {} exceeded maximum attempts; not retrying ({})", log_bytes!(payment_hash.0), attempts);
let payer = self.payer.node_id();
let first_hops = self.payer.first_hops();
- let inflight_htlcs = self.create_inflight_map();
+ let inflight_htlcs = self.payer.inflight_htlcs();
let route = self.router.find_route(
&payer, ¶ms, Some(&first_hops.iter().collect::<Vec<_>>()), inflight_htlcs
}
match self.payer.retry_payment(&route.as_ref().unwrap(), payment_id) {
- Ok(()) => {
- for path in route.unwrap().paths.into_iter() {
- self.process_path_inflight_htlcs(payment_hash, path);
- }
- Ok(())
- },
+ Ok(()) => Ok(()),
Err(PaymentSendFailure::ParameterError(_)) |
Err(PaymentSendFailure::PathParameterError(_)) => {
log_trace!(self.logger, "Failed to retry for payment {} due to bogus route/payment data, not retrying.", log_bytes!(payment_hash.0));
Err(())
},
- Err(PaymentSendFailure::AllFailedRetrySafe(_)) => {
+ Err(PaymentSendFailure::AllFailedResendSafe(_)) => {
self.retry_payment(payment_id, payment_hash, params)
},
- Err(PaymentSendFailure::PartialFailure { failed_paths_retry, results, .. }) => {
- // If a `PartialFailure` error contains a result that is an `Ok()`, it means that
- // part of our payment is retried. When we receive `MonitorUpdateInProgress`, it
- // means that we are still waiting for our channel monitor update to complete.
- for (result, path) in results.iter().zip(route.unwrap().paths.into_iter()) {
- match result {
- Ok(_) | Err(APIError::MonitorUpdateInProgress) => {
- self.process_path_inflight_htlcs(payment_hash, path);
- },
- _ => {},
- }
- }
-
+ Err(PaymentSendFailure::DuplicatePayment) => {
+ log_error!(self.logger, "Got a DuplicatePayment error when attempting to retry a payment, this shouldn't happen.");
+ Err(())
+ }
+ Err(PaymentSendFailure::PartialFailure { failed_paths_retry, .. }) => {
if let Some(retry) = failed_paths_retry {
// Always return Ok for the same reason as noted in pay_internal.
let _ = self.retry_payment(payment_id, payment_hash, &retry);
pub fn remove_cached_payment(&self, payment_hash: &PaymentHash) {
self.payment_cache.lock().unwrap().remove(payment_hash);
}
-
- /// Use path information in the payment_cache to construct a HashMap mapping a channel's short
- /// channel id and direction to the amount being sent through it.
- ///
- /// This function should be called whenever we need information about currently used up liquidity
- /// across payments.
- fn create_inflight_map(&self) -> InFlightHtlcs {
- let mut total_inflight_map: HashMap<(u64, bool), u64> = HashMap::new();
- // Make an attempt at finding existing payment information from `payment_cache`. If it
- // does not exist, it probably is a fresh payment and we can just return an empty
- // HashMap.
- for payment_info in self.payment_cache.lock().unwrap().values() {
- for path in &payment_info.paths {
- if path.is_empty() { break };
- // total_inflight_map needs to be direction-sensitive when keeping track of the HTLC value
- // that is held up. However, the `hops` array, which is a path returned by `find_route` in
- // the router excludes the payer node. In the following lines, the payer's information is
- // hardcoded with an inflight value of 0 so that we can correctly represent the first hop
- // in our sliding window of two.
- let our_node_id: PublicKey = self.payer.node_id();
- let reversed_hops_with_payer = path.iter().rev().skip(1)
- .map(|hop| hop.pubkey)
- .chain(core::iter::once(our_node_id));
- let mut cumulative_msat = 0;
-
- // Taking the reversed vector from above, we zip it with just the reversed hops list to
- // work "backwards" of the given path, since the last hop's `fee_msat` actually represents
- // the total amount sent.
- for (next_hop, prev_hop) in path.iter().rev().zip(reversed_hops_with_payer) {
- cumulative_msat += next_hop.fee_msat;
- total_inflight_map
- .entry((next_hop.short_channel_id, NodeId::from_pubkey(&prev_hop) < NodeId::from_pubkey(&next_hop.pubkey)))
- .and_modify(|used_liquidity_msat| *used_liquidity_msat += cumulative_msat)
- .or_insert(cumulative_msat);
- }
- }
- }
-
- InFlightHtlcs::new(total_inflight_map)
- }
}
fn expiry_time_from_unix_epoch(invoice: &Invoice) -> Duration {
} else { false }
}
-impl<P: Deref, R: ScoringRouter, L: Deref, E: EventHandler, T: Time> EventHandler for InvoicePayerUsingTime<P, R, L, E, T>
+impl<P: Deref, R: ScoringRouter, L: Deref, E: sealed::BaseEventHandler, T: Time>
+ InvoicePayerUsingTime<P, R, L, E, T>
where
P::Target: Payer,
L::Target: Logger,
{
- fn handle_event(&self, event: &Event) {
- match event {
- Event::PaymentPathFailed { payment_hash, path, .. }
- | Event::PaymentPathSuccessful { path, payment_hash: Some(payment_hash), .. }
- | Event::ProbeSuccessful { payment_hash, path, .. }
- | Event::ProbeFailed { payment_hash, path, .. } => {
- self.remove_path_inflight_htlcs(*payment_hash, path);
- },
- _ => {},
- }
-
+ /// Returns a bool indicating whether the processed event should be forwarded to a user-provided
+ /// event handler.
+ fn handle_event_internal(&self, event: &Event) -> bool {
match event {
Event::PaymentPathFailed {
payment_id, payment_hash, payment_failed_permanently, path, short_channel_id, retry, ..
self.payer.abandon_payment(payment_id.unwrap());
} else if self.retry_payment(payment_id.unwrap(), *payment_hash, retry.as_ref().unwrap()).is_ok() {
// We retried at least somewhat, don't provide the PaymentPathFailed event to the user.
- return;
+ return false;
} else {
self.payer.abandon_payment(payment_id.unwrap());
}
let mut payment_cache = self.payment_cache.lock().unwrap();
let attempts = payment_cache
.remove(payment_hash)
- .map_or(1, |payment_info| payment_info.attempts.count + 1);
+ .map_or(1, |attempts| attempts.count + 1);
log_trace!(self.logger, "Payment {} succeeded (attempts: {})", log_bytes!(payment_hash.0), attempts);
},
Event::ProbeSuccessful { payment_hash, path, .. } => {
}
// Delegate to the decorated event handler unless the payment is retried.
- self.event_handler.handle_event(event)
+ true
+ }
+}
+
+impl<P: Deref, R: ScoringRouter, L: Deref, E: EventHandler, T: Time>
+ EventHandler for InvoicePayerUsingTime<P, R, L, E, T>
+where
+ P::Target: Payer,
+ L::Target: Logger,
+{
+ fn handle_event(&self, event: Event) {
+ let should_forward = self.handle_event_internal(&event);
+ if should_forward {
+ self.event_handler.handle_event(event)
+ }
+ }
+}
+
+impl<P: Deref, R: ScoringRouter, L: Deref, T: Time, F: Future, H: Fn(Event) -> F>
+ InvoicePayerUsingTime<P, R, L, H, T>
+where
+ P::Target: Payer,
+ L::Target: Logger,
+{
+ /// Intercepts events required by the [`InvoicePayer`] and forwards them to the underlying event
+ /// handler, if necessary, to handle them asynchronously.
+ pub async fn handle_event_async(&self, event: Event) {
+ let should_forward = self.handle_event_internal(&event);
+ if should_forward {
+ (self.event_handler)(event).await;
+ }
}
}
use std::time::{SystemTime, Duration};
use crate::time_utils::tests::SinceEpoch;
use crate::DEFAULT_EXPIRY_TIME;
- use lightning::util::errors::APIError::{ChannelUnavailable, MonitorUpdateInProgress};
fn invoice(payment_preimage: PaymentPreimage) -> Invoice {
let payment_hash = Sha256::hash(&payment_preimage.0);
#[test]
fn pays_invoice_on_first_attempt() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap());
assert_eq!(*payer.attempts.borrow(), 1);
- invoice_payer.handle_event(&Event::PaymentSent {
+ invoice_payer.handle_event(Event::PaymentSent {
payment_id, payment_preimage, payment_hash, fee_paid_msat: None
});
assert_eq!(*event_handled.borrow(), true);
#[test]
fn pays_invoice_on_retry() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 2);
- invoice_payer.handle_event(&Event::PaymentSent {
+ invoice_payer.handle_event(Event::PaymentSent {
payment_id, payment_preimage, payment_hash, fee_paid_msat: None
});
assert_eq!(*event_handled.borrow(), true);
#[test]
fn pays_invoice_on_partial_failure() {
- let event_handler = |_: &_| { panic!() };
+ let event_handler = |_: Event| { panic!() };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
#[test]
fn retries_payment_path_for_unknown_payment() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 1);
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 2);
- invoice_payer.handle_event(&Event::PaymentSent {
+ invoice_payer.handle_event(Event::PaymentSent {
payment_id, payment_preimage, payment_hash, fee_paid_msat: None
});
assert_eq!(*event_handled.borrow(), true);
#[test]
fn fails_paying_invoice_after_max_retry_counts() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 2);
final_value_msat: final_value_msat / 2, ..TestRouter::retry_for_invoice(&invoice)
}),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 3);
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 3);
}
#[test]
fn fails_paying_invoice_after_max_retry_timeout() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 2);
SinceEpoch::advance(Duration::from_secs(121));
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event.clone());
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 2);
}
#[test]
fn fails_paying_invoice_with_missing_retry_params() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: None,
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 1);
}
#[test]
fn fails_paying_invoice_after_expiration() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payer = TestPayer::new();
let router = TestRouter::new(TestScorer::new());
#[test]
fn fails_retrying_invoice_after_expiration() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(retry_data),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 1);
}
#[test]
fn fails_paying_invoice_after_retry_error() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 2);
}
#[test]
fn fails_paying_invoice_after_rejected_by_payee() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), true);
assert_eq!(*payer.attempts.borrow(), 1);
}
#[test]
fn fails_repaying_invoice_with_pending_payment() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id: None,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), true);
}
let router = FailingRouter {};
let logger = TestLogger::new();
let invoice_payer =
- InvoicePayer::new(&payer, router, &logger, |_: &_| {}, Retry::Attempts(0));
+ InvoicePayer::new(&payer, router, &logger, |_: Event| {}, Retry::Attempts(0));
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
let router = TestRouter::new(TestScorer::new());
let logger = TestLogger::new();
let invoice_payer =
- InvoicePayer::new(&payer, router, &logger, |_: &_| {}, Retry::Attempts(0));
+ InvoicePayer::new(&payer, router, &logger, |_: Event| {}, Retry::Attempts(0));
match invoice_payer.pay_invoice(&invoice) {
Err(PaymentError::Sending(_)) => {},
#[test]
fn pays_zero_value_invoice_using_amount() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = zero_value_invoice(payment_preimage);
Some(invoice_payer.pay_zero_value_invoice(&invoice, final_value_msat).unwrap());
assert_eq!(*payer.attempts.borrow(), 1);
- invoice_payer.handle_event(&Event::PaymentSent {
+ invoice_payer.handle_event(Event::PaymentSent {
payment_id, payment_preimage, payment_hash, fee_paid_msat: None
});
assert_eq!(*event_handled.borrow(), true);
#[test]
fn fails_paying_zero_value_invoice_with_amount() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payer = TestPayer::new();
let router = TestRouter::new(TestScorer::new());
#[test]
fn pays_pubkey_with_amount() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let pubkey = pubkey();
let payment_preimage = PaymentPreimage([1; 32]);
short_channel_id: None,
retry: Some(retry),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
assert_eq!(*event_handled.borrow(), false);
assert_eq!(*payer.attempts.borrow(), 2);
- invoice_payer.handle_event(&Event::PaymentSent {
+ invoice_payer.handle_event(Event::PaymentSent {
payment_id, payment_preimage, payment_hash, fee_paid_msat: None
});
assert_eq!(*event_handled.borrow(), true);
#[test]
fn scores_failed_channel() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
short_channel_id,
retry: Some(TestRouter::retry_for_invoice(&invoice)),
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
}
#[test]
fn scores_successful_channels() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let invoice = invoice(payment_preimage);
let event = Event::PaymentPathSuccessful {
payment_id, payment_hash, path: route.paths[0].clone()
};
- invoice_payer.handle_event(&event);
+ invoice_payer.handle_event(event);
let event = Event::PaymentPathSuccessful {
payment_id, payment_hash, path: route.paths[1].clone()
};
- invoice_payer.handle_event(&event);
- }
-
- #[test]
- fn generates_correct_inflight_map_data() {
- let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
-
- let payment_preimage = PaymentPreimage([1; 32]);
- let invoice = invoice(payment_preimage);
- let payment_hash = Some(PaymentHash(invoice.payment_hash().clone().into_inner()));
- let final_value_msat = invoice.amount_milli_satoshis().unwrap();
-
- let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat));
- let final_value_msat = invoice.amount_milli_satoshis().unwrap();
- let route = TestRouter::route_for_value(final_value_msat);
- let router = TestRouter::new(TestScorer::new());
- let logger = TestLogger::new();
- let invoice_payer =
- InvoicePayer::new(&payer, router, &logger, event_handler, Retry::Attempts(0));
-
- let payment_id = invoice_payer.pay_invoice(&invoice).unwrap();
-
- let inflight_map = invoice_payer.create_inflight_map();
- // First path check
- assert_eq!(inflight_map.0.get(&(0, false)).unwrap().clone(), 94);
- assert_eq!(inflight_map.0.get(&(1, true)).unwrap().clone(), 84);
- assert_eq!(inflight_map.0.get(&(2, false)).unwrap().clone(), 64);
-
- // Second path check
- assert_eq!(inflight_map.0.get(&(3, false)).unwrap().clone(), 74);
- assert_eq!(inflight_map.0.get(&(4, false)).unwrap().clone(), 64);
-
- invoice_payer.handle_event(&Event::PaymentPathSuccessful {
- payment_id, payment_hash, path: route.paths[0].clone()
- });
-
- let inflight_map = invoice_payer.create_inflight_map();
-
- assert_eq!(inflight_map.0.get(&(0, false)), None);
- assert_eq!(inflight_map.0.get(&(1, true)), None);
- assert_eq!(inflight_map.0.get(&(2, false)), None);
-
- // Second path should still be inflight
- assert_eq!(inflight_map.0.get(&(3, false)).unwrap().clone(), 74);
- assert_eq!(inflight_map.0.get(&(4, false)).unwrap().clone(), 64)
+ invoice_payer.handle_event(event);
}
#[test]
- fn considers_inflight_htlcs_between_invoice_payments_when_path_succeeds() {
- // First, let's just send a payment through, but only make sure one of the path completes
+ fn considers_inflight_htlcs_between_invoice_payments() {
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let payment_invoice = invoice(payment_preimage);
- let payment_hash = Some(PaymentHash(payment_invoice.payment_hash().clone().into_inner()));
let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
let payer = TestPayer::new()
.expect_send(Amount::ForInvoice(final_value_msat))
.expect_send(Amount::ForInvoice(final_value_msat));
- let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
- let route = TestRouter::route_for_value(final_value_msat);
let scorer = TestScorer::new()
// 1st invoice, 1st path
.expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
.expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
.expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
// 2nd invoice, 1st path
- .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
- .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
- .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 84, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 94, effective_capacity: EffectiveCapacity::Unknown } )
// 2nd invoice, 2nd path
.expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
.expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 74, effective_capacity: EffectiveCapacity::Unknown } );
let invoice_payer =
InvoicePayer::new(&payer, router, &logger, event_handler, Retry::Attempts(0));
- // Succeed 1st path, leave 2nd path inflight
- let payment_id = invoice_payer.pay_invoice(&payment_invoice).unwrap();
- invoice_payer.handle_event(&Event::PaymentPathSuccessful {
- payment_id, payment_hash, path: route.paths[0].clone()
- });
+ // Make first invoice payment.
+ invoice_payer.pay_invoice(&payment_invoice).unwrap();
// Let's pay a second invoice that will be using the same path. This should trigger the
- // assertions that expect the last 4 ChannelUsage values above where TestScorer is initialized.
- // Particularly, the 2nd path of the 1st payment, since it is not yet complete, should still
- // have 64 msats inflight for paths considering the channel with scid of 1.
+ // assertions that expect `ChannelUsage` values of the first invoice payment that is still
+ // in-flight.
let payment_preimage_2 = PaymentPreimage([2; 32]);
let payment_invoice_2 = invoice(payment_preimage_2);
invoice_payer.pay_invoice(&payment_invoice_2).unwrap();
fn considers_inflight_htlcs_between_retries() {
// First, let's just send a payment through, but only make sure one of the path completes
let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+ let event_handler = |_: Event| { *event_handled.borrow_mut() = true; };
let payment_preimage = PaymentPreimage([1; 32]);
let payment_invoice = invoice(payment_preimage);
// Fail 1st path, leave 2nd path inflight
let payment_id = Some(invoice_payer.pay_invoice(&payment_invoice).unwrap());
- invoice_payer.handle_event(&Event::PaymentPathFailed {
+ invoice_payer.payer.fail_path(&TestRouter::path_for_value(final_value_msat));
+ invoice_payer.handle_event(Event::PaymentPathFailed {
payment_id,
payment_hash,
network_update: None,
});
// Fails again the 1st path of our retry
- invoice_payer.handle_event(&Event::PaymentPathFailed {
+ invoice_payer.payer.fail_path(&TestRouter::path_for_value(final_value_msat / 2));
+ invoice_payer.handle_event(Event::PaymentPathFailed {
payment_id,
payment_hash,
network_update: None,
});
}
- #[test]
- fn accounts_for_some_inflight_htlcs_sent_during_partial_failure() {
- let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
-
- let payment_preimage = PaymentPreimage([1; 32]);
- let invoice_to_pay = invoice(payment_preimage);
- let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
-
- let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
- let payer = TestPayer::new()
- .fails_with_partial_failure(
- retry.clone(), OnAttempt(1),
- Some(vec![
- Err(ChannelUnavailable { err: "abc".to_string() }), Err(MonitorUpdateInProgress)
- ]))
- .expect_send(Amount::ForInvoice(final_value_msat));
-
- let router = TestRouter::new(TestScorer::new());
- let logger = TestLogger::new();
- let invoice_payer =
- InvoicePayer::new(&payer, router, &logger, event_handler, Retry::Attempts(0));
-
- invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
- let inflight_map = invoice_payer.create_inflight_map();
-
- // Only the second path, which failed with `MonitorUpdateInProgress` should be added to our
- // inflight map because retries are disabled.
- assert_eq!(inflight_map.0.len(), 2);
- }
-
- #[test]
- fn accounts_for_all_inflight_htlcs_sent_during_partial_failure() {
- let event_handled = core::cell::RefCell::new(false);
- let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
-
- let payment_preimage = PaymentPreimage([1; 32]);
- let invoice_to_pay = invoice(payment_preimage);
- let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
-
- let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
- let payer = TestPayer::new()
- .fails_with_partial_failure(
- retry.clone(), OnAttempt(1),
- Some(vec![
- Ok(()), Err(MonitorUpdateInProgress)
- ]))
- .expect_send(Amount::ForInvoice(final_value_msat));
-
- let router = TestRouter::new(TestScorer::new());
- let logger = TestLogger::new();
- let invoice_payer =
- InvoicePayer::new(&payer, router, &logger, event_handler, Retry::Attempts(0));
-
- invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
- let inflight_map = invoice_payer.create_inflight_map();
-
- // All paths successful, hence we check of the existence of all 5 hops.
- assert_eq!(inflight_map.0.len(), 5);
- }
-
struct TestRouter {
scorer: RefCell<TestScorer>,
}
expectations: core::cell::RefCell<VecDeque<Amount>>,
attempts: core::cell::RefCell<usize>,
failing_on_attempt: core::cell::RefCell<HashMap<usize, PaymentSendFailure>>,
+ inflight_htlcs_paths: core::cell::RefCell<Vec<Vec<RouteHop>>>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
expectations: core::cell::RefCell::new(VecDeque::new()),
attempts: core::cell::RefCell::new(0),
failing_on_attempt: core::cell::RefCell::new(HashMap::new()),
+ inflight_htlcs_paths: core::cell::RefCell::new(Vec::new()),
}
}
panic!("Unexpected amount: {:?}", actual_value_msats);
}
}
+
+ fn track_inflight_htlcs(&self, route: &Route) {
+ for path in &route.paths {
+ self.inflight_htlcs_paths.borrow_mut().push(path.clone());
+ }
+ }
+
+ fn fail_path(&self, path: &Vec<RouteHop>) {
+ let path_idx = self.inflight_htlcs_paths.borrow().iter().position(|p| p == path);
+
+ if let Some(idx) = path_idx {
+ self.inflight_htlcs_paths.borrow_mut().swap_remove(idx);
+ }
+ }
}
impl Drop for TestPayer {
_payment_secret: &Option<PaymentSecret>, _payment_id: PaymentId,
) -> Result<(), PaymentSendFailure> {
self.check_value_msats(Amount::ForInvoice(route.get_total_amount()));
+ self.track_inflight_htlcs(route);
self.check_attempts()
}
&self, route: &Route, _payment_id: PaymentId
) -> Result<(), PaymentSendFailure> {
self.check_value_msats(Amount::OnRetry(route.get_total_amount()));
+ self.track_inflight_htlcs(route);
self.check_attempts()
}
fn abandon_payment(&self, _payment_id: PaymentId) { }
+
+ fn inflight_htlcs(&self) -> InFlightHtlcs {
+ let mut inflight_htlcs = InFlightHtlcs::new();
+ for path in self.inflight_htlcs_paths.clone().into_inner() {
+ inflight_htlcs.process_path(&path, self.node_id());
+ }
+ inflight_htlcs
+ }
}
// *** Full Featured Functional Tests with a Real ChannelManager ***
route.paths[1][0].fee_msat = 50_000_000;
router.expect_find_route(Ok(route.clone()));
- let event_handler = |_: &_| { panic!(); };
+ let event_handler = |_: Event| { panic!(); };
let invoice_payer = InvoicePayer::new(nodes[0].node, router, nodes[0].logger, event_handler, Retry::Attempts(1));
assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch(
route.paths[1][0].fee_msat = 50_000_001;
router.expect_find_route(Ok(route.clone()));
- let event_handler = |_: &_| { panic!(); };
+ let event_handler = |_: Event| { panic!(); };
let invoice_payer = InvoicePayer::new(nodes[0].node, router, nodes[0].logger, event_handler, Retry::Attempts(1));
assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch(
route.paths.remove(1);
router.expect_find_route(Ok(route.clone()));
- let expected_events: RefCell<VecDeque<&dyn Fn(&Event)>> = RefCell::new(VecDeque::new());
- let event_handler = |event: &Event| {
+ let expected_events: RefCell<VecDeque<&dyn Fn(Event)>> = RefCell::new(VecDeque::new());
+ let event_handler = |event: Event| {
let event_checker = expected_events.borrow_mut().pop_front().unwrap();
event_checker(event);
};
// `PaymentPathFailed` being passed up to the user (us, in this case). Previously, we'd
// treated this as "HTLC complete" and dropped the retry counter, causing us to retry again
// if the final HTLC failed.
- expected_events.borrow_mut().push_back(&|ev: &Event| {
+ expected_events.borrow_mut().push_back(&|ev: Event| {
if let Event::PaymentPathFailed { payment_failed_permanently, all_paths_failed, .. } = ev {
assert!(!payment_failed_permanently);
assert!(all_paths_failed);
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true);
- expected_events.borrow_mut().push_back(&|ev: &Event| {
+ expected_events.borrow_mut().push_back(&|ev: Event| {
if let Event::PaymentPathFailed { payment_failed_permanently, all_paths_failed, .. } = ev {
assert!(!payment_failed_permanently);
assert!(all_paths_failed);
} else { panic!("Unexpected event"); }
});
- expected_events.borrow_mut().push_back(&|ev: &Event| {
+ expected_events.borrow_mut().push_back(&|ev: Event| {
if let Event::PaymentFailed { .. } = ev {
} else { panic!("Unexpected event"); }
});
use bitcoin_hashes::{Hash, sha256};
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use lightning::chain::keysinterface::{Recipient, KeysInterface, Sign};
+use lightning::chain::keysinterface::{Recipient, KeysInterface};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChannelDetails, ChannelManager, PaymentId, PaymentSendFailure, MIN_FINAL_CLTV_EXPIRY};
#[cfg(feature = "std")]
/// [`ChannelManager::create_inbound_payment`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment
/// [`ChannelManager::create_inbound_payment_for_hash`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
/// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
-pub fn create_phantom_invoice<Signer: Sign, K: Deref, L: Deref>(
+pub fn create_phantom_invoice<K: Deref, L: Deref>(
amt_msat: Option<u64>, payment_hash: Option<PaymentHash>, description: String,
invoice_expiry_delta_secs: u32, phantom_route_hints: Vec<PhantomRouteHints>, keys_manager: K,
logger: L, network: Currency,
{
let description = Description::new(description).map_err(SignOrCreationError::CreationError)?;
let description = InvoiceDescription::Direct(&description,);
- _create_phantom_invoice::<Signer, K, L>(
+ _create_phantom_invoice::<K, L>(
amt_msat, payment_hash, description, invoice_expiry_delta_secs, phantom_route_hints,
keys_manager, logger, network,
)
/// [`ChannelManager::create_inbound_payment`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment
/// [`ChannelManager::create_inbound_payment_for_hash`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
/// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
-pub fn create_phantom_invoice_with_description_hash<Signer: Sign, K: Deref, L: Deref>(
+pub fn create_phantom_invoice_with_description_hash<K: Deref, L: Deref>(
amt_msat: Option<u64>, payment_hash: Option<PaymentHash>, invoice_expiry_delta_secs: u32,
description_hash: Sha256, phantom_route_hints: Vec<PhantomRouteHints>, keys_manager: K,
logger: L, network: Currency
K::Target: KeysInterface,
L::Target: Logger,
{
- _create_phantom_invoice::<Signer, K, L>(
+ _create_phantom_invoice::<K, L>(
amt_msat, payment_hash, InvoiceDescription::Hash(&description_hash),
invoice_expiry_delta_secs, phantom_route_hints, keys_manager, logger, network,
)
}
#[cfg(feature = "std")]
-fn _create_phantom_invoice<Signer: Sign, K: Deref, L: Deref>(
+fn _create_phantom_invoice<K: Deref, L: Deref>(
amt_msat: Option<u64>, payment_hash: Option<PaymentHash>, description: InvoiceDescription,
invoice_expiry_delta_secs: u32, phantom_route_hints: Vec<PhantomRouteHints>, keys_manager: K,
logger: L, network: Currency,
fn abandon_payment(&self, payment_id: PaymentId) {
self.abandon_payment(payment_id)
}
+
+ fn inflight_htlcs(&self) -> InFlightHtlcs { self.compute_inflight_htlcs() }
}
use lightning::ln::functional_test_utils::*;
use lightning::ln::msgs::ChannelMessageHandler;
use lightning::routing::router::{PaymentParameters, RouteParameters, find_route};
- use lightning::util::enforcing_trait_impls::EnforcingSigner;
use lightning::util::events::{MessageSendEvent, MessageSendEventsProvider, Event};
use lightning::util::test_utils;
use lightning::util::config::UserConfig;
let non_default_invoice_expiry_secs = 4200;
let invoice =
- crate::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface, &test_utils::TestLogger>(
+ crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface, &test_utils::TestLogger>(
Some(payment_amt), payment_hash, "test".to_string(), non_default_invoice_expiry_secs,
route_hints, &nodes[1].keys_manager, &nodes[1].logger, Currency::BitcoinTestnet
).unwrap();
nodes[fwd_idx].node.process_pending_htlc_forwards();
let payment_preimage_opt = if user_generated_pmt_hash { None } else { Some(payment_preimage) };
- expect_payment_received!(&nodes[fwd_idx], payment_hash, payment_secret, payment_amt, payment_preimage_opt);
+ expect_payment_received!(&nodes[fwd_idx], payment_hash, payment_secret, payment_amt, payment_preimage_opt, route.paths[0].last().unwrap().pubkey);
do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[fwd_idx])[..]), false, payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
nodes[2].node.get_phantom_route_hints(),
];
- let invoice = crate::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface, &test_utils::TestLogger>(Some(payment_amt), Some(payment_hash), "test".to_string(), 3600, route_hints, &nodes[1].keys_manager, &nodes[1].logger, Currency::BitcoinTestnet).unwrap();
+ let invoice = crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface, &test_utils::TestLogger>(Some(payment_amt), Some(payment_hash), "test".to_string(), 3600, route_hints, &nodes[1].keys_manager, &nodes[1].logger, Currency::BitcoinTestnet).unwrap();
let chan_0_1 = &nodes[1].node.list_usable_channels()[0];
assert_eq!(invoice.route_hints()[0].0[0].htlc_minimum_msat, chan_0_1.inbound_htlc_minimum_msat);
let description_hash = crate::Sha256(Hash::hash("Description hash phantom invoice".as_bytes()));
let non_default_invoice_expiry_secs = 4200;
let invoice = crate::utils::create_phantom_invoice_with_description_hash::<
- EnforcingSigner, &test_utils::TestKeysInterface, &test_utils::TestLogger,
+ &test_utils::TestKeysInterface, &test_utils::TestLogger,
>(
Some(payment_amt), None, non_default_invoice_expiry_secs, description_hash,
route_hints, &nodes[1].keys_manager, &nodes[1].logger, Currency::BitcoinTestnet
.map(|route_hint| route_hint.phantom_scid)
.collect::<HashSet<u64>>();
- let invoice = crate::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface, &test_utils::TestLogger>(invoice_amt, None, "test".to_string(), 3600, phantom_route_hints, &invoice_node.keys_manager, &invoice_node.logger, Currency::BitcoinTestnet).unwrap();
+ let invoice = crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface, &test_utils::TestLogger>(invoice_amt, None, "test".to_string(), 3600, phantom_route_hints, &invoice_node.keys_manager, &invoice_node.logger, Currency::BitcoinTestnet).unwrap();
let invoice_hints = invoice.private_routes();
//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
//! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
//! loop {
-//! let event_handler = |event: &Event| {
+//! let event_handler = |event: Event| {
//! // Handle the event!
//! };
//! channel_manager.await_persistable_update();
//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
//! lightning_net_tokio::setup_inbound(peer_manager, socket);
//! loop {
-//! let event_handler = |event: &Event| {
+//! let event_handler = |event: Event| {
//! // Handle the event!
//! };
//! channel_manager.await_persistable_update();
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::hashes::hex::FromHex;
use lightning::chain::channelmonitor::ChannelMonitor;
-use lightning::chain::keysinterface::{Sign, KeysInterface};
+use lightning::chain::keysinterface::KeysInterface;
use lightning::util::ser::{ReadableArgs, Writeable};
use lightning::util::persist::KVStorePersister;
use std::fs;
}
/// Read `ChannelMonitor`s from disk.
- pub fn read_channelmonitors<Signer: Sign, K: Deref> (
+ pub fn read_channelmonitors<K: Deref> (
&self, keys_manager: K
- ) -> Result<Vec<(BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
- where K::Target: KeysInterface<Signer=Signer> + Sized,
+ ) -> Result<Vec<(BlockHash, ChannelMonitor<<K::Target as KeysInterface>::Signer>)>, std::io::Error>
+ where K::Target: KeysInterface + Sized,
{
let mut path = PathBuf::from(&self.path_to_channel_data);
path.push("monitors");
let contents = fs::read(&file.path())?;
let mut buffer = Cursor::new(&contents);
- match <(BlockHash, ChannelMonitor<Signer>)>::read(&mut buffer, &*keys_manager) {
+ match <(BlockHash, ChannelMonitor<<K::Target as KeysInterface>::Signer>)>::read(&mut buffer, &*keys_manager) {
Ok((blockhash, channel_monitor)) => {
if channel_monitor.get_funding_txo().0.txid != txid.unwrap() || channel_monitor.get_funding_txo().0.index != index.unwrap() {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "ChannelMonitor was stored in the wrong file"));
let mut prefix = [0u8; 4];
read_cursor.read_exact(&mut prefix)?;
- match prefix {
- GOSSIP_PREFIX => {}
- _ => {
- return Err(DecodeError::UnknownVersion.into());
- }
- };
+ if prefix != GOSSIP_PREFIX {
+ return Err(DecodeError::UnknownVersion.into());
+ }
let chain_hash: BlockHash = Readable::read(read_cursor)?;
let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
let node_id_1_index: BigSize = Readable::read(read_cursor)?;
let node_id_2_index: BigSize = Readable::read(read_cursor)?;
+
if max(node_id_1_index.0, node_id_2_index.0) >= node_id_count as u64 {
return Err(DecodeError::InvalidValue.into());
};
// flags are always sent in full, and hence always need updating
let standard_channel_flags = channel_flags & 0b_0000_0011;
- let mut synthetic_update = if channel_flags & 0b_1000_0000 == 0 {
- // full update, field flags will indicate deviations from the default
- UnsignedChannelUpdate {
- chain_hash,
- short_channel_id,
- timestamp: backdated_timestamp,
- flags: standard_channel_flags,
- cltv_expiry_delta: default_cltv_expiry_delta,
- htlc_minimum_msat: default_htlc_minimum_msat,
- htlc_maximum_msat: default_htlc_maximum_msat,
- fee_base_msat: default_fee_base_msat,
- fee_proportional_millionths: default_fee_proportional_millionths,
- excess_data: Vec::new(),
- }
- } else {
+ let mut synthetic_update = UnsignedChannelUpdate {
+ chain_hash,
+ short_channel_id,
+ timestamp: backdated_timestamp,
+ flags: standard_channel_flags,
+ cltv_expiry_delta: default_cltv_expiry_delta,
+ htlc_minimum_msat: default_htlc_minimum_msat,
+ htlc_maximum_msat: default_htlc_maximum_msat,
+ fee_base_msat: default_fee_base_msat,
+ fee_proportional_millionths: default_fee_proportional_millionths,
+ excess_data: Vec::new(),
+ };
+
+ let mut skip_update_for_unknown_channel = false;
+
+ if (channel_flags & 0b_1000_0000) != 0 {
// incremental update, field flags will indicate mutated values
let read_only_network_graph = network_graph.read_only();
- let channel = read_only_network_graph
+ if let Some(channel) = read_only_network_graph
.channels()
- .get(&short_channel_id)
- .ok_or(LightningError {
- err: "Couldn't find channel for update".to_owned(),
- action: ErrorAction::IgnoreError,
- })?;
-
- let directional_info = channel
- .get_directional_info(channel_flags)
- .ok_or(LightningError {
- err: "Couldn't find previous directional data for update".to_owned(),
- action: ErrorAction::IgnoreError,
- })?;
-
- UnsignedChannelUpdate {
- chain_hash,
- short_channel_id,
- timestamp: backdated_timestamp,
- flags: standard_channel_flags,
- cltv_expiry_delta: directional_info.cltv_expiry_delta,
- htlc_minimum_msat: directional_info.htlc_minimum_msat,
- htlc_maximum_msat: directional_info.htlc_maximum_msat,
- fee_base_msat: directional_info.fees.base_msat,
- fee_proportional_millionths: directional_info.fees.proportional_millionths,
- excess_data: Vec::new(),
+ .get(&short_channel_id) {
+
+ let directional_info = channel
+ .get_directional_info(channel_flags)
+ .ok_or(LightningError {
+ err: "Couldn't find previous directional data for update".to_owned(),
+ action: ErrorAction::IgnoreError,
+ })?;
+
+ synthetic_update.cltv_expiry_delta = directional_info.cltv_expiry_delta;
+ synthetic_update.htlc_minimum_msat = directional_info.htlc_minimum_msat;
+ synthetic_update.htlc_maximum_msat = directional_info.htlc_maximum_msat;
+ synthetic_update.fee_base_msat = directional_info.fees.base_msat;
+ synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths;
+
+ } else {
+ skip_update_for_unknown_channel = true;
}
};
synthetic_update.htlc_maximum_msat = htlc_maximum_msat;
}
+ if skip_update_for_unknown_channel {
+ continue;
+ }
+
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
}
#[test]
- fn incremental_only_update_fails_without_prior_announcements() {
+ fn incremental_only_update_ignores_missing_channel() {
let incremental_update_input = vec![
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 229, 183, 167,
let rapid_sync = RapidGossipSync::new(&network_graph);
let update_result = rapid_sync.update_network_graph(&incremental_update_input[..]);
- assert!(update_result.is_err());
- if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
- assert_eq!(lightning_error.err, "Couldn't find channel for update");
- } else {
- panic!("Unexpected update result: {:?}", update_result)
- }
+ assert!(update_result.is_ok());
}
#[test]
assert!(after.contains("619737530008010752"));
assert!(after.contains("783241506229452801"));
}
+
+ #[test]
+ pub fn update_fails_with_unknown_version() {
+ let unknown_version_input = vec![
+ 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+ 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+ 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+ 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+ 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+ 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+ 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+ 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+ 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+ 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+ 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+ 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
+ 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
+ 0, 0, 1,
+ ];
+
+ let block_hash = genesis_block(Network::Bitcoin).block_hash();
+ let logger = TestLogger::new();
+ let network_graph = NetworkGraph::new(block_hash, &logger);
+ let rapid_sync = RapidGossipSync::new(&network_graph);
+ let update_result = rapid_sync.update_network_graph(&unknown_version_input[..]);
+
+ assert!(update_result.is_err());
+
+ if let Err(GraphSyncError::DecodeError(DecodeError::UnknownVersion)) = update_result {
+ // this is the expected error type
+ } else {
+ panic!("Unexpected update result: {:?}", update_result)
+ }
+ }
}
//! servicing [`ChannelMonitor`] updates from the client.
use bitcoin::blockdata::block::BlockHeader;
-use bitcoin::hash_types::Txid;
+use bitcoin::hash_types::{Txid, BlockHash};
use crate::chain;
use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
use crate::util::logger::Logger;
use crate::util::errors::APIError;
use crate::util::events;
-use crate::util::events::EventHandler;
+use crate::util::events::{Event, EventHandler};
use crate::ln::channelmanager::ChannelDetails;
use crate::prelude::*;
self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
}
+ #[cfg(not(c_bindings))]
+ /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
+ pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
+ self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+ (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
+ }).collect()
+ }
+
+ #[cfg(c_bindings)]
+ /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
+ pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
+ self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+ (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
+ }).collect()
+ }
+
+
#[cfg(test)]
pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
use crate::util::events::EventsProvider;
let events = core::cell::RefCell::new(Vec::new());
- let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
+ let event_handler = |event: events::Event| events.borrow_mut().push(event);
self.process_pending_events(&event_handler);
events.into_inner()
}
+
+ /// Processes any events asynchronously in the order they were generated since the last call
+ /// using the given event handler.
+ ///
+ /// See the trait-level documentation of [`EventsProvider`] for requirements.
+ ///
+ /// [`EventsProvider`]: crate::util::events::EventsProvider
+ pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
+ &self, handler: H
+ ) {
+ let mut pending_events = Vec::new();
+ for monitor_state in self.monitors.read().unwrap().values() {
+ pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
+ }
+ for event in pending_events {
+ handler(event).await;
+ }
+ }
}
impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
});
}
- fn get_relevant_txids(&self) -> Vec<Txid> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
let mut txids = Vec::new();
let monitor_states = self.monitors.read().unwrap();
for monitor_state in monitor_states.values() {
for monitor_state in self.monitors.read().unwrap().values() {
pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
}
- for event in pending_events.drain(..) {
- handler.handle_event(&event);
+ for event in pending_events {
+ handler.handle_event(event);
}
}
#[cfg(anchors)]
for monitor_state in self.monitors.read().unwrap().values() {
pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
}
- for event in pending_events.drain(..) {
- handler.handle_event(&event);
+ for event in pending_events {
+ handler.handle_event(event);
}
}
}
// Note that updates is a HashMap so the ordering here is actually random. This shouldn't
// fail either way but if it fails intermittently it's depending on the ordering of updates.
let mut update_iter = updates.iter();
- nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
+ let next_update = update_iter.next().unwrap().clone();
+ // Should contain next_update when pending updates listed.
+ #[cfg(not(c_bindings))]
+ assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
+ .unwrap().contains(&next_update));
+ #[cfg(c_bindings)]
+ assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
+ .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
+ nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
+ // Should not contain the previously pending next_update when pending updates listed.
+ #[cfg(not(c_bindings))]
+ assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
+ .unwrap().contains(&next_update));
+ #[cfg(c_bindings)]
+ assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
+ .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
}
}
-/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
-/// transaction causing it.
+/// An entry for an [`OnchainEvent`], stating the block height and hash when the event was
+/// observed, as well as the transaction causing it.
///
/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
#[derive(PartialEq, Eq)]
struct OnchainEventEntry {
txid: Txid,
height: u32,
+ block_hash: Option<BlockHash>, // Added as optional, will be filled in for any entry generated on 0.0.113 or after
event: OnchainEvent,
transaction: Option<Transaction>, // Added as optional, but always filled in, in LDK 0.0.110
}
(0, self.txid, required),
(1, self.transaction, option),
(2, self.height, required),
+ (3, self.block_hash, option),
(4, self.event, required),
});
Ok(())
fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
let mut txid = Txid::all_zeros();
let mut transaction = None;
+ let mut block_hash = None;
let mut height = 0;
let mut event = None;
read_tlv_fields!(reader, {
(0, txid, required),
(1, transaction, option),
(2, height, required),
+ (3, block_hash, option),
(4, event, ignorable),
});
if let Some(ev) = event {
- Ok(Some(Self { txid, transaction, height, event: ev }))
+ Ok(Some(Self { txid, transaction, height, block_hash, event: ev }))
} else {
Ok(None)
}
/// spending CSV for revocable outputs).
htlcs_resolved_on_chain: Vec<IrrevocablyResolvedHTLC>,
+ /// The set of `SpendableOutput` events which we have already passed upstream to be claimed.
+ /// These are tracked explicitly to ensure that we don't generate the same events redundantly
+ /// if users duplicatively confirm old transactions. Specifically for transactions claiming a
+ /// revoked remote outpoint we otherwise have no tracking at all once they've reached
+ /// [`ANTI_REORG_DELAY`], so we have to track them here.
+ spendable_txids_confirmed: Vec<Txid>,
+
// We simply modify best_block in Channel's block_connected so that serialization is
// consistent but hopefully the users' copy handles block_connected in a consistent way.
// (we do *not*, however, update them in update_monitor to ensure any local user copies keep
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
+ (13, self.spendable_txids_confirmed, vec_type),
});
Ok(())
funding_spend_confirmed: None,
confirmed_commitment_tx_counterparty_output: None,
htlcs_resolved_on_chain: Vec::new(),
+ spendable_txids_confirmed: Vec::new(),
best_block,
counterparty_node_id: Some(counterparty_node_id),
/// Gets the list of pending events which were generated by previous actions, clearing the list
/// in the process.
///
- /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to
- /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
- /// no internal locking in ChannelMonitors.
+ /// This is called by the [`EventsProvider::process_pending_events`] implementation for
+ /// [`ChainMonitor`].
+ ///
+ /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events
+ /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
self.inner.lock().unwrap().get_and_clear_pending_events()
}
}
/// Returns the set of txids that should be monitored for re-organization out of the chain.
- pub fn get_relevant_txids(&self) -> Vec<Txid> {
+ pub fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
let inner = self.inner.lock().unwrap();
- let mut txids: Vec<Txid> = inner.onchain_events_awaiting_threshold_conf
+ let mut txids: Vec<(Txid, Option<BlockHash>)> = inner.onchain_events_awaiting_threshold_conf
.iter()
- .map(|entry| entry.txid)
+ .map(|entry| (entry.txid, entry.block_hash))
.chain(inner.onchain_tx_handler.get_relevant_txids().into_iter())
.collect();
txids.sort_unstable();
/// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC.
macro_rules! fail_unbroadcast_htlcs {
($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, $commitment_tx_confirmed: expr,
- $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
+ $commitment_tx_conf_height: expr, $commitment_tx_conf_hash: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
debug_assert_eq!($commitment_tx_confirmed.txid(), $commitment_txid_confirmed);
macro_rules! check_htlc_fails {
txid: $commitment_txid_confirmed,
transaction: Some($commitment_tx_confirmed.clone()),
height: $commitment_tx_conf_height,
+ block_hash: Some(*$commitment_tx_conf_hash),
event: OnchainEvent::HTLCUpdate {
source: (**source).clone(),
payment_hash: htlc.payment_hash.clone(),
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
- self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
}
}
}
PackageSolvingData::HolderFundingOutput(funding_output),
best_block_height, false, best_block_height,
);
- self.onchain_tx_handler.update_claims_view(
- &[], vec![commitment_package], best_block_height, best_block_height,
+ self.onchain_tx_handler.update_claims_view_from_requests(
+ vec![commitment_package], best_block_height, best_block_height,
broadcaster, &bounded_fee_estimator, logger,
);
}
/// Returns packages to claim the revoked output(s), as well as additional outputs to watch and
/// general information about the output that is to the counterparty in the commitment
/// transaction.
- fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L)
+ fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L)
-> (Vec<PackageTemplate>, TransactionOutputs, CommitmentTxCounterpartyOutputInfo)
where L::Target: Logger {
// Most secp and related errors trying to create keys means we have no hope of constructing
if let Some(per_commitment_data) = per_commitment_option {
fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, tx, height,
- per_commitment_data.iter().map(|(htlc, htlc_source)|
+ block_hash, per_commitment_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
} else {
debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn");
fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height,
- [].iter().map(|reference| *reference), logger);
+ block_hash, [].iter().map(|reference| *reference), logger);
}
}
} else if let Some(per_commitment_data) = per_commitment_option {
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
- fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height,
+ fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height, block_hash,
per_commitment_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
(claimable_outpoints, Some((htlc_txid, outputs)))
}
- // Returns (1) `PackageTemplate`s that can be given to the OnChainTxHandler, so that the handler can
+ // Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can
// broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable
// script so we can detect whether a holder transaction has been seen on-chain.
fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx, conf_height: u32) -> (Vec<PackageTemplate>, Option<(Script, PublicKey, PublicKey)>) {
/// revoked using data in holder_claimable_outpoints.
/// Should not be used if check_spend_revoked_transaction succeeds.
/// Returns None unless the transaction is definitely one of our commitment transactions.
- fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> Option<(Vec<PackageTemplate>, TransactionOutputs)> where L::Target: Logger {
+ fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> Option<(Vec<PackageTemplate>, TransactionOutputs)> where L::Target: Logger {
let commitment_txid = tx.txid();
let mut claim_requests = Vec::new();
let mut watch_outputs = Vec::new();
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, tx, height,
- self.current_holder_commitment_tx.htlc_outputs.iter()
+ block_hash, self.current_holder_commitment_tx.htlc_outputs.iter()
.map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
- fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height,
+ fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height, block_hash,
holder_tx.htlc_outputs.iter().map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())),
logger);
}
if height > self.best_block.height() {
self.best_block = BestBlock::new(block_hash, height);
- self.block_confirmed(height, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
+ self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
} else if block_hash != self.best_block.block_hash() {
self.best_block = BestBlock::new(block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
let mut watch_outputs = Vec::new();
let mut claimable_outpoints = Vec::new();
- for tx in &txn_matched {
+ 'tx_iter: for tx in &txn_matched {
+ let txid = tx.txid();
+ // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
+ if Some(txid) == self.funding_spend_confirmed {
+ log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
+ continue 'tx_iter;
+ }
+ for ev in self.onchain_events_awaiting_threshold_conf.iter() {
+ if ev.txid == txid {
+ if let Some(conf_hash) = ev.block_hash {
+ assert_eq!(header.block_hash(), conf_hash,
+ "Transaction {} was already confirmed and is being re-confirmed in a different block.\n\
+ This indicates a severe bug in the transaction connection logic - a reorg should have been processed first!", ev.txid);
+ }
+ log_debug!(logger, "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid);
+ continue 'tx_iter;
+ }
+ }
+ for htlc in self.htlcs_resolved_on_chain.iter() {
+ if Some(txid) == htlc.resolving_txid {
+ log_debug!(logger, "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid);
+ continue 'tx_iter;
+ }
+ }
+ for spendable_txid in self.spendable_txids_confirmed.iter() {
+ if txid == *spendable_txid {
+ log_debug!(logger, "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid);
+ continue 'tx_iter;
+ }
+ }
+
if tx.input.len() == 1 {
// Assuming our keys were not leaked (in which case we're screwed no matter what),
// commitment transactions and HTLC transactions will all only ever have one input,
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
let mut balance_spendable_csv = None;
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
- log_bytes!(self.funding_info.0.to_channel_id()), tx.txid());
+ log_bytes!(self.funding_info.0.to_channel_id()), txid);
self.funding_spend_seen = true;
let mut commitment_tx_to_counterparty_output = None;
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
let (mut new_outpoints, new_outputs, counterparty_output_idx_sats) =
- self.check_spend_counterparty_transaction(&tx, height, &logger);
+ self.check_spend_counterparty_transaction(&tx, height, &block_hash, &logger);
commitment_tx_to_counterparty_output = counterparty_output_idx_sats;
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
claimable_outpoints.append(&mut new_outpoints);
if new_outpoints.is_empty() {
- if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &logger) {
+ if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &block_hash, &logger) {
debug_assert!(commitment_tx_to_counterparty_output.is_none(),
"A commitment transaction matched as both a counterparty and local commitment tx?");
if !new_outputs.1.is_empty() {
}
}
}
- let txid = tx.txid();
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid,
transaction: Some((*tx).clone()),
height,
+ block_hash: Some(block_hash),
event: OnchainEvent::FundingSpendConfirmation {
on_local_output_csv: balance_spendable_csv,
commitment_tx_to_counterparty_output,
// While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
// can also be resolved in a few other ways which can have more than one output. Thus,
// we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
- self.is_resolving_htlc_output(&tx, height, &logger);
+ self.is_resolving_htlc_output(&tx, height, &block_hash, &logger);
- self.is_paying_spendable_output(&tx, height, &logger);
+ self.is_paying_spendable_output(&tx, height, &block_hash, &logger);
}
if height > self.best_block.height() {
self.best_block = BestBlock::new(block_hash, height);
}
- self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
+ self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
}
/// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update
/// `self.best_block` before calling if a new best blockchain tip is available. More
/// concretely, `self.best_block` must never be at a lower height than `conf_height`, avoiding
- /// complexity especially in `OnchainTx::update_claims_view`.
+ /// complexity especially in
+ /// `OnchainTx::update_claims_view_from_requests`/`OnchainTx::update_claims_view_from_matched_txn`.
///
/// `conf_height` should be set to the height at which any new transaction(s)/block(s) were
/// confirmed at, even if it is not the current best height.
fn block_confirmed<B: Deref, F: Deref, L: Deref>(
&mut self,
conf_height: u32,
+ conf_hash: BlockHash,
txn_matched: Vec<&Transaction>,
mut watch_outputs: Vec<TransactionOutputs>,
mut claimable_outpoints: Vec<PackageTemplate>,
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
+ self.spendable_txids_confirmed.push(entry.txid);
},
OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
}
}
- self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger);
// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
F::Target: FeeEstimator,
L::Target: Logger,
{
- self.onchain_events_awaiting_threshold_conf.retain(|ref entry| if entry.txid == *txid {
- log_info!(logger, "Removing onchain event with txid {}", txid);
- false
- } else { true });
+ let mut removed_height = None;
+ for entry in self.onchain_events_awaiting_threshold_conf.iter() {
+ if entry.txid == *txid {
+ removed_height = Some(entry.height);
+ break;
+ }
+ }
+
+ if let Some(removed_height) = removed_height {
+ log_info!(logger, "transaction_unconfirmed of txid {} implies height {} was reorg'd out", txid, removed_height);
+ self.onchain_events_awaiting_threshold_conf.retain(|ref entry| if entry.height >= removed_height {
+ log_info!(logger, "Transaction {} reorg'd out", entry.txid);
+ false
+ } else { true });
+ }
+
+ debug_assert!(!self.onchain_events_awaiting_threshold_conf.iter().any(|ref entry| entry.txid == *txid));
+
self.onchain_tx_handler.transaction_unconfirmed(txid, broadcaster, fee_estimator, logger);
}
/// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
/// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
- fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
+ fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger {
'outer_loop: for input in &tx.input {
let mut payment_data = None;
let htlc_claim = HTLCClaim::from_witness(&input.witness);
log_claim!($tx_info, $holder_tx, htlc_output, false);
let outbound_htlc = $holder_tx == htlc_output.offered;
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
- txid: tx.txid(), height, transaction: Some(tx.clone()),
+ txid: tx.txid(), height, block_hash: Some(*block_hash), transaction: Some(tx.clone()),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: if accepted_preimage_claim || offered_preimage_claim {
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.txid(),
height,
+ block_hash: Some(*block_hash),
transaction: Some(tx.clone()),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
txid: tx.txid(),
transaction: Some(tx.clone()),
height,
+ block_hash: Some(*block_hash),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: Some(payment_preimage),
txid: tx.txid(),
transaction: Some(tx.clone()),
height,
+ block_hash: Some(*block_hash),
event: OnchainEvent::HTLCUpdate {
source, payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
}
/// Check if any transaction broadcasted is paying fund back to some address we can assume to own
- fn is_paying_spendable_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
+ fn is_paying_spendable_output<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger {
let mut spendable_output = None;
for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us
if i > ::core::u16::MAX as usize {
txid: tx.txid(),
transaction: Some(tx.clone()),
height,
+ block_hash: Some(*block_hash),
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3);
}
- fn get_relevant_txids(&self) -> Vec<Txid> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
self.0.get_relevant_txids()
}
}
const MAX_ALLOC_SIZE: usize = 64*1024;
-impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
- for (BlockHash, ChannelMonitor<Signer>) {
+impl<'a, K: KeysInterface> ReadableArgs<&'a K>
+ for (BlockHash, ChannelMonitor<K::Signer>) {
fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
macro_rules! unwrap_obj {
($key: expr) => {
return Err(DecodeError::InvalidValue);
}
}
- let onchain_tx_handler: OnchainTxHandler<Signer> = ReadableArgs::read(reader, keys_manager)?;
+ let onchain_tx_handler: OnchainTxHandler<K::Signer> = ReadableArgs::read(reader, keys_manager)?;
let lockdown_from_offchain = Readable::read(reader)?;
let holder_tx_signed = Readable::read(reader)?;
let mut funding_spend_seen = Some(false);
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
+ let mut spendable_txids_confirmed = Some(Vec::new());
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, vec_type),
(7, funding_spend_seen, option),
(9, counterparty_node_id, option),
(11, confirmed_commitment_tx_counterparty_output, option),
+ (13, spendable_txids_confirmed, vec_type),
});
let mut secp_ctx = Secp256k1::new();
funding_spend_confirmed,
confirmed_commitment_tx_counterparty_output,
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
+ spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
best_block,
counterparty_node_id,
SecretKey::from_slice(&[41; 32]).unwrap(),
[41; 32],
0,
- [0; 32]
+ [0; 32],
);
let counterparty_pubkeys = ChannelPublicKeys {
}),
funding_outpoint: Some(funding_outpoint),
opt_anchors: None,
+ opt_non_zero_fee_anchors: None,
};
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
htlc_base_key: SecretKey,
commitment_seed: [u8; 32],
channel_value_satoshis: u64,
- channel_keys_id: [u8; 32]) -> InMemorySigner {
+ channel_keys_id: [u8; 32],
+ ) -> InMemorySigner {
let holder_channel_pubkeys =
InMemorySigner::make_holder_keys(secp_ctx, &funding_key, &revocation_base_key,
&payment_key, &delayed_payment_base_key,
let mut htlc_sigs = Vec::with_capacity(commitment_tx.htlcs().len());
for htlc in commitment_tx.htlcs() {
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), self.holder_selected_contest_delay(), htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ let channel_parameters = self.get_channel_parameters();
+ let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), self.holder_selected_contest_delay(), htlc, self.opt_anchors(), channel_parameters.opt_non_zero_fee_anchors.is_some(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
htlc_base_key,
commitment_seed,
channel_value_satoshis,
- params.clone()
+ params.clone(),
)
}
/// if they become available at the same time.
fn best_block_updated(&self, header: &BlockHeader, height: u32);
- /// Returns transactions that should be monitored for reorganization out of the chain.
+ /// Returns transactions that should be monitored for reorganization out of the chain along
+ /// with the hash of the block as part of which had been previously confirmed.
///
/// Will include any transactions passed to [`transactions_confirmed`] that have insufficient
/// confirmations to be safe from a chain reorganization. Will not include any transactions
/// May be called to determine the subset of transactions that must still be monitored for
/// reorganization. Will be idempotent between calls but may change as a result of calls to the
/// other interface methods. Thus, this is useful to determine which transactions may need to be
- /// given to [`transaction_unconfirmed`].
+ /// given to [`transaction_unconfirmed`]. If any of the returned transactions are confirmed in
+ /// a block other than the one with the given hash, they need to be unconfirmed and reconfirmed
+ /// via [`transaction_unconfirmed`] and [`transactions_confirmed`], respectively.
///
/// [`transactions_confirmed`]: Self::transactions_confirmed
/// [`transaction_unconfirmed`]: Self::transaction_unconfirmed
- fn get_relevant_txids(&self) -> Vec<Txid>;
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)>;
}
/// An enum representing the status of a channel monitor update persistence.
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::Script;
-use bitcoin::hash_types::Txid;
+use bitcoin::hash_types::{Txid, BlockHash};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
use bitcoin::secp256k1;
struct OnchainEventEntry {
txid: Txid,
height: u32,
+ block_hash: Option<BlockHash>, // Added as optional, will be filled in for any entry generated on 0.0.113 or after
event: OnchainEvent,
}
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_tlv_fields!(writer, {
(0, self.txid, required),
+ (1, self.block_hash, option),
(2, self.height, required),
(4, self.event, required),
});
fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
let mut txid = Txid::all_zeros();
let mut height = 0;
+ let mut block_hash = None;
let mut event = None;
read_tlv_fields!(reader, {
(0, txid, required),
+ (1, block_hash, option),
(2, height, required),
(4, event, ignorable),
});
if let Some(ev) = event {
- Ok(Some(Self { txid, height, event: ev }))
+ Ok(Some(Self { txid, height, block_hash, event: ev }))
} else {
Ok(None)
}
/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
- /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
- /// if we receive a preimage after force-close.
- /// `conf_height` represents the height at which the transactions in `txn_matched` were
- /// confirmed. This does not need to equal the current blockchain tip height, which should be
- /// provided via `cur_height`, however it must never be higher than `cur_height`.
- pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
+ /// Together with `update_claims_view_from_matched_txn` this used to be named
+ /// `block_connected`, but it is now also used for claiming an HTLC output if we receive a
+ /// preimage after force-close.
+ ///
+ /// `conf_height` represents the height at which the request was generated. This
+ /// does not need to equal the current blockchain tip height, which should be provided via
+ /// `cur_height`, however it must never be higher than `cur_height`.
+ pub(crate) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Deref>(
+ &mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
+ broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {} and {} claim requests", cur_height, txn_matched.len(), conf_height, requests.len());
+ log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;
self.pending_claim_requests.insert(txid, req);
}
}
+ }
+ /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
+ /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
+ /// Together with `update_claims_view_from_requests` this used to be named `block_connected`,
+ /// but it is now also used for claiming an HTLC output if we receive a preimage after force-close.
+ ///
+ /// `conf_height` represents the height at which the transactions in `txn_matched` were
+ /// confirmed. This does not need to equal the current blockchain tip height, which should be
+ /// provided via `cur_height`, however it must never be higher than `cur_height`.
+ pub(crate) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Deref>(
+ &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash,
+ cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
let mut bump_candidates = HashMap::new();
for tx in txn_matched {
// Scan all input to verify is one of the outpoint spent is of interest for us
let entry = OnchainEventEntry {
txid: tx.txid(),
height: conf_height,
+ block_hash: Some(conf_hash),
event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
};
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
let entry = OnchainEventEntry {
txid: tx.txid(),
height: conf_height,
+ block_hash: Some(conf_hash),
event: OnchainEvent::ContentiousOutpoint { package },
};
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
self.claimable_outpoints.get(outpoint).is_some()
}
- pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
- let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
+ pub(crate) fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ let mut txids: Vec<(Txid, Option<BlockHash>)> = self.onchain_events_awaiting_threshold_conf
.iter()
- .map(|entry| entry.txid)
+ .map(|entry| (entry.txid, entry.block_hash))
.collect();
- txids.sort_unstable();
+ txids.sort_unstable_by_key(|(txid, _)| *txid);
txids.dedup();
txids
}
pub mod util;
pub mod chain;
pub mod ln;
+#[allow(unused)]
+mod offers;
pub mod routing;
pub mod onion_message;
///
/// Panics if htlc.transaction_output_index.is_none() (as such HTLCs do not appear in the
/// commitment transaction).
-pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, contest_delay: u16, htlc: &HTLCOutputInCommitment, opt_anchors: bool, broadcaster_delayed_payment_key: &PublicKey, revocation_key: &PublicKey) -> Transaction {
+pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, contest_delay: u16, htlc: &HTLCOutputInCommitment, opt_anchors: bool, use_non_zero_fee_anchors: bool, broadcaster_delayed_payment_key: &PublicKey, revocation_key: &PublicKey) -> Transaction {
let mut txins: Vec<TxIn> = Vec::new();
txins.push(TxIn {
previous_output: OutPoint {
} else {
htlc_success_tx_weight(opt_anchors)
};
- let output_value = if opt_anchors {
+ let output_value = if opt_anchors && !use_non_zero_fee_anchors {
htlc.amount_msat / 1000
} else {
let total_fee = feerate_per_kw as u64 * weight / 1000;
pub funding_outpoint: Option<chain::transaction::OutPoint>,
/// Are anchors (zero fee HTLC transaction variant) used for this channel. Boolean is
/// serialization backwards-compatible.
- pub opt_anchors: Option<()>
+ pub opt_anchors: Option<()>,
+ /// Are non-zero-fee anchors are enabled (used in conjuction with opt_anchors)
+ /// It is intended merely for backwards compatibility with signers that need it.
+ /// There is no support for this feature in LDK channel negotiation.
+ pub opt_non_zero_fee_anchors: Option<()>,
}
/// Late-bound per-channel counterparty data used to build transactions.
(6, counterparty_parameters, option),
(8, funding_outpoint, option),
(10, opt_anchors, option),
+ (12, opt_non_zero_fee_anchors, option),
});
/// Static channel fields used to build transactions given per-commitment fields, organized by
is_outbound_from_holder: false,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }),
funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
- opt_anchors: None
+ opt_anchors: None,
+ opt_non_zero_fee_anchors: None,
};
let mut htlcs_with_aux: Vec<(_, ())> = Vec::new();
let inner = CommitmentTransaction::new_with_auxiliary_htlc_data(0, 0, 0, false, dummy_key.clone(), dummy_key.clone(), keys, 0, &mut htlcs_with_aux, &channel_parameters.as_counterparty_broadcastable());
htlcs: Vec<HTLCOutputInCommitment>,
// A boolean that is serialization backwards-compatible
opt_anchors: Option<()>,
+ // Whether non-zero-fee anchors should be used
+ opt_non_zero_fee_anchors: Option<()>,
// A cache of the parties' pubkeys required to construct the transaction, see doc for trust()
keys: TxCreationKeys,
// For access to the pre-built transaction, see doc for trust()
(10, built, required),
(12, htlcs, vec_type),
(14, opt_anchors, option),
+ (16, opt_non_zero_fee_anchors, option),
});
impl CommitmentTransaction {
transaction,
txid
},
+ opt_non_zero_fee_anchors: None,
}
}
+ /// Use non-zero fee anchors
+ ///
+ /// (C-not exported) due to move, and also not likely to be useful for binding users
+ pub fn with_non_zero_fee_anchors(mut self) -> Self {
+ self.opt_non_zero_fee_anchors = Some(());
+ self
+ }
+
fn internal_rebuild_transaction(&self, keys: &TxCreationKeys, channel_parameters: &DirectedChannelTransactionParameters, broadcaster_funding_key: &PublicKey, countersignatory_funding_key: &PublicKey) -> Result<BuiltCommitmentTransaction, ()> {
let (obscured_commitment_transaction_number, txins) = Self::internal_build_inputs(self.commitment_number, channel_parameters);
for this_htlc in inner.htlcs.iter() {
assert!(this_htlc.transaction_output_index.is_some());
- let htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ let htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), self.opt_non_zero_fee_anchors.is_some(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key);
// Further, we should never be provided the preimage for an HTLC-Timeout transaction.
if this_htlc.offered && preimage.is_some() { unreachable!(); }
- let mut htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ let mut htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), self.opt_non_zero_fee_anchors.is_some(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key);
is_outbound_from_holder: false,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }),
funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
- opt_anchors: None
+ opt_anchors: None,
+ opt_non_zero_fee_anchors: None,
};
let mut htlcs_with_aux: Vec<(_, ())> = Vec::new();
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
-use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId};
+use crate::ln::channelmanager::{self, ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use crate::util::config::UserConfig;
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::util::errors::APIError;
let events_3 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_3.len(), 1);
match events_3[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(payment_hash_1, *payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
}
(update_fulfill_htlcs[0].clone(), commitment_signed.clone())
let events_5 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
match events_5[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(payment_hash_2, *payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
},
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentReceived { payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(payment_hash, our_payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[1], 1);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[0], 1);
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
(Some(payment_preimage_4), Some(payment_hash_4))
} else { (None, None) };
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
+ nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
check_added_monitors!(nodes[1], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[1], 1);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
// Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
// until we've channel_monitor_update'd and updated for the new commitment transaction.
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
// to the next message also tests resetting the delivery order.
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
// Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
// Note that at this point there is a pending commitment transaction update for A being held by
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
- let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ let channel_id = chan_1.2;
+ let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id } => {
assert_eq!(payment_hash_2, *payment_hash);
assert_eq!(1_000_000, amount_msat);
+ assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
+ assert_eq!(via_user_channel_id, Some(42));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(payment_hash_3, *payment_hash);
assert_eq!(1_000_000, amount_msat);
+ assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 0);
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
// disconnect the peers. Note that the fuzzer originally found this issue because
// deserializing a ChannelManager in this state causes an assertion failure.
if reload_a {
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let config = UserConfig::default();
- nodes_0_deserialized = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap().1
- };
- nodes[0].node = &nodes_0_deserialized;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[0], 1);
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
} else {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
}
nodes[0].chain_source.watched_txn.lock().unwrap().clear();
nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
- let nodes_0_serialized = nodes[0].node.encode();
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let config = UserConfig::default();
- nodes_0_deserialized = {
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors: HashMap::new(),
- }).unwrap().1
- };
- nodes[0].node = &nodes_0_deserialized;
- assert!(nodes_0_read.is_empty());
-
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
assert!(nodes[0].node.list_channels().is_empty());
}
nodes[1].chain_source.watched_txn.lock().unwrap().clear();
nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
- let nodes_1_serialized = nodes[1].node.encode();
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[1].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
- nodes[1].chain_monitor = &new_chain_monitor;
-
- let mut nodes_1_read = &nodes_1_serialized[..];
- let config = UserConfig::default();
- nodes_1_deserialized = {
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: nodes[1].chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors: HashMap::new(),
- }).unwrap().1
- };
- nodes[1].node = &nodes_1_deserialized;
- assert!(nodes_1_read.is_empty());
+ reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
assert!(nodes[1].node.list_channels().is_empty());
pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder,
- pub mon_update: Option<ChannelMonitorUpdate>,
- pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
pub shutdown_msg: Option<msgs::Shutdown>,
}
inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
- user_id: u64,
+ user_id: u128,
channel_id: [u8; 32],
channel_state: u32,
// Constructors:
pub fn new_outbound<K: Deref, F: Deref>(
fee_estimator: &LowerBoundedFeeEstimator<F>, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
- channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig, current_chain_height: u32,
+ channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
outbound_scid_alias: u64
) -> Result<Channel<Signer>, APIError>
where K::Target: KeysInterface<Signer = Signer>,
counterparty_parameters: None,
funding_outpoint: None,
opt_anchors: if opt_anchors { Some(()) } else { None },
+ opt_non_zero_fee_anchors: None
},
funding_transaction: None,
})
}
- fn check_remote_fee<F: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>, feerate_per_kw: u32) -> Result<(), ChannelError>
- where F::Target: FeeEstimator
+ fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
+ feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
+ -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
{
// We only bound the fee updates on the upper side to prevent completely absurd feerates,
// always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
// of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
// sat/kw before the comparison here.
if feerate_per_kw + 250 < lower_limit {
+ if let Some(cur_feerate) = cur_feerate_per_kw {
+ if feerate_per_kw > cur_feerate {
+ log_warn!(logger,
+ "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
+ cur_feerate, feerate_per_kw);
+ return Ok(());
+ }
+ }
return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
}
Ok(())
/// Assumes chain_hash has already been checked and corresponds with what we expect!
pub fn new_from_req<K: Deref, F: Deref, L: Deref>(
fee_estimator: &LowerBoundedFeeEstimator<F>, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
- msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig, current_chain_height: u32, logger: &L,
+ msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L,
outbound_scid_alias: u64
) -> Result<Channel<Signer>, ChannelError>
where K::Target: KeysInterface<Signer = Signer>,
if msg.htlc_minimum_msat >= full_channel_value_msat {
return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
}
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if msg.to_self_delay > max_counterparty_selected_contest_delay {
}),
funding_outpoint: None,
opt_anchors: if opt_anchors { Some(()) } else { None },
+ opt_non_zero_fee_anchors: None
},
funding_transaction: None,
if let Some(_) = htlc.transaction_output_index {
let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(),
- &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
}
}
- pub fn update_fee<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
- where F::Target: FeeEstimator
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
{
if self.is_outbound() {
return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.feerate_per_kw), logger)?;
let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(None);
self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
// Short circuit the whole handler as there is nothing we can resend them
return Ok(ReestablishResponses {
channel_ready: None,
- raa: None, commitment_update: None, mon_update: None,
+ raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
- holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs,
});
}
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
}),
- raa: None, commitment_update: None, mon_update: None,
+ raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
- holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs,
});
}
log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
}
- if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- // We're up-to-date and not waiting on a remote revoke (if we are our
- // channel_reestablish should result in them sending a revoke_and_ack), but we may
- // have received some updates while we were disconnected. Free the holding cell
- // now!
- match self.free_holding_cell_htlcs(logger) {
- Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
- Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
- panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
- Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: Some(commitment_update),
- order: self.resend_order.clone(),
- mon_update: Some(monitor_update),
- holding_cell_failed_htlcs,
- })
- },
- Ok((None, holding_cell_failed_htlcs)) => {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs,
- })
- },
- }
- } else {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs: Vec::new(),
- })
- }
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: None,
+ order: self.resend_order.clone(),
+ })
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
if required_revoke.is_some() {
log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
self.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
- commitment_update: None, raa: None, mon_update: None,
+ commitment_update: None, raa: None,
order: self.resend_order.clone(),
- holding_cell_failed_htlcs: Vec::new(),
})
} else {
Ok(ReestablishResponses {
raa: required_revoke,
commitment_update: Some(self.get_last_commitment_update(logger)),
order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs: Vec::new(),
})
}
} else {
pub fn shutdown<K: Deref>(
&mut self, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
- where K::Target: KeysInterface<Signer = Signer>
+ where K::Target: KeysInterface
{
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
/// Gets the "user_id" value passed into the construction of this channel. It has no special
/// meaning and exists only to allow users to have a persistent identifier of a channel.
- pub fn get_user_id(&self) -> u64 {
+ pub fn get_user_id(&self) -> u128 {
self.user_id
}
self.channel_transaction_parameters.funding_outpoint
}
+ /// Returns the block hash in which our funding transaction was confirmed.
+ pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
+ self.funding_tx_confirmed_in
+ }
+
+ /// Returns the current number of confirmations on the funding transaction.
+ pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
+ if self.funding_tx_confirmation_height == 0 {
+ // We either haven't seen any confirmation yet, or observed a reorg.
+ return 0;
+ }
+
+ height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
+ }
+
fn get_holder_selected_contest_delay(&self) -> u16 {
self.channel_transaction_parameters.holder_selected_contest_delay
}
/// should be sent back to the counterparty node.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- pub fn accept_inbound_channel(&mut self, user_id: u64) -> msgs::AcceptChannel {
+ pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
if self.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
- encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)),
log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
/// holding cell HTLCs for payment failure.
pub fn get_shutdown<K: Deref>(&mut self, keys_provider: &K, their_features: &InitFeatures, target_feerate_sats_per_kw: Option<u32>)
-> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
- where K::Target: KeysInterface<Signer = Signer> {
+ where K::Target: KeysInterface {
for htlc in self.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
self.update_time_counter += 1;
(monitor_update, dropped_outbound_htlcs)
}
+
+ pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=&HTLCSource> {
+ self.holding_cell_htlc_updates.iter()
+ .flat_map(|htlc_update| {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, .. } => { Some(source) }
+ _ => None
+ }
+ })
+ .chain(self.pending_outbound_htlcs.iter().map(|htlc| &htlc.source))
+ }
}
const SERIALIZATION_VERSION: u8 = 2;
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
- self.user_id.write(writer)?;
+ // `user_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
+ // the low bytes now and the optional high bytes later.
+ let user_id_low = self.user_id as u64;
+ user_id_low.write(writer)?;
// Version 1 deserializers expected to read parts of the config object here. Version 2
// deserializers (0.0.99) now read config through TLVs, and as we now require them for
let channel_ready_event_emitted = Some(self.channel_ready_event_emitted);
+ // `user_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
+ // we write the high bytes as an option here.
+ let user_id_high_opt = Some((self.user_id >> 64) as u64);
+
write_tlv_fields!(writer, {
(0, self.announcement_sigs, option),
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
(19, self.latest_inbound_scid_alias, option),
(21, self.outbound_scid_alias, required),
(23, channel_ready_event_emitted, option),
+ (25, user_id_high_opt, option),
});
Ok(())
}
const MAX_ALLOC_SIZE: usize = 64*1024;
-impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
- where K::Target: KeysInterface<Signer = Signer> {
+impl<'a, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<<K::Target as KeysInterface>::Signer>
+ where K::Target: KeysInterface {
fn read<R : io::Read>(reader: &mut R, args: (&'a K, u32)) -> Result<Self, DecodeError> {
let (keys_source, serialized_height) = args;
let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
- let user_id = Readable::read(reader)?;
+ // `user_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
+ // the low bytes now and the high bytes later.
+ let user_id_low: u64 = Readable::read(reader)?;
let mut config = Some(LegacyChannelConfig::default());
if ver == 1 {
let mut outbound_scid_alias = None;
let mut channel_ready_event_emitted = None;
+ let mut user_id_high_opt: Option<u64> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(19, latest_inbound_scid_alias, option),
(21, outbound_scid_alias, option),
(23, channel_ready_event_emitted, option),
+ (25, user_id_high_opt, option),
});
if let Some(preimages) = preimages_opt {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_source.get_secure_random_bytes());
+ // `user_id` used to be a single u64 value. In order to remain backwards
+ // compatible with versions prior to 0.0.113, the u128 is serialized as two
+ // separate u64 values.
+ let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
+
Ok(Channel {
user_id,
// arithmetic, causing a panic with debug assertions enabled.
let fee_est = TestFeeEstimator { fee_est: 42 };
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
- assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator, u32::max_value()).is_err());
+ assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
+ u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
}
struct Keys {
// These aren't set in the test vectors:
[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10_000_000,
- [0; 32]
+ [0; 32],
);
assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
let ref htlc = htlcs[$htlc_idx];
let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.feerate_per_kw,
chan.get_counterparty_selected_contest_delay().unwrap(),
- &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
-use crate::routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
+use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
use crate::ln::wire::Encode;
use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient};
use crate::util::config::{UserConfig, ChannelConfig};
-use crate::util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::{byte_utils, events};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
- pub(super) amt_to_forward: u64,
+ pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
+ pub(super) outgoing_amt_msat: u64,
pub(super) outgoing_cltv_value: u32,
}
Fail(HTLCFailureMsg),
}
-pub(super) enum HTLCForwardInfo {
- AddHTLC {
- forward_info: PendingHTLCInfo,
+pub(super) struct PendingAddHTLCInfo {
+ pub(super) forward_info: PendingHTLCInfo,
- // These fields are produced in `forward_htlcs()` and consumed in
- // `process_pending_htlc_forwards()` for constructing the
- // `HTLCSource::PreviousHopData` for failed and forwarded
- // HTLCs.
- //
- // Note that this may be an outbound SCID alias for the associated channel.
- prev_short_channel_id: u64,
- prev_htlc_id: u64,
- prev_funding_outpoint: OutPoint,
- },
+ // These fields are produced in `forward_htlcs()` and consumed in
+ // `process_pending_htlc_forwards()` for constructing the
+ // `HTLCSource::PreviousHopData` for failed and forwarded
+ // HTLCs.
+ //
+ // Note that this may be an outbound SCID alias for the associated channel.
+ prev_short_channel_id: u64,
+ prev_htlc_id: u64,
+ prev_funding_outpoint: OutPoint,
+ prev_user_channel_id: u128,
+}
+
+pub(super) enum HTLCForwardInfo {
+ AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
struct MsgHandleErrInternal {
err: msgs::LightningError,
- chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed
+ chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
Self { err, chan_id: None, shutdown_finish: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
Self {
err: LightningError {
err: err.clone(),
// Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
- /// Map from payment hash to the payment data and any HTLCs which are to us and can be
- /// failed/claimed by the user.
- ///
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the channels given here actually existing anymore by the time you
- /// go to read them!
- claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
// |
// |__`forward_htlcs`
// |
-// |__`channel_state`
-// | |
-// | |__`id_to_peer`
+// |__`pending_inbound_payments`
// | |
-// | |__`short_to_chan_info`
+// | |__`claimable_htlcs`
// | |
-// | |__`per_peer_state`
+// | |__`pending_outbound_payments`
// | |
-// | |__`outbound_scid_aliases`
-// | |
-// | |__`pending_inbound_payments`
+// | |__`channel_state`
+// | |
+// | |__`id_to_peer`
+// | |
+// | |__`short_to_chan_info`
// | |
-// | |__`pending_outbound_payments`
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
// | |
// | |__`best_block`
// | |
#[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+ /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+ /// failed/claimed by the user.
+ ///
+ /// Note that, no consistency guarantees are made about the channels given here actually
+ /// existing anymore by the time you go to read them!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
///
/// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
pub unspendable_punishment_reserve: Option<u64>,
- /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
- pub user_channel_id: u64,
+ /// The `user_channel_id` passed in to create_channel, or a random value if the channel was
+ /// inbound. This may be zero for inbound channels serialized with LDK versions prior to
+ /// 0.0.113.
+ pub user_channel_id: u128,
/// Our total balance. This is the amount we would get if we close the channel.
/// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
/// amount is not likely to be recoverable on close.
/// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
/// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
pub confirmations_required: Option<u32>,
+ /// The current number of confirmations on the funding transaction.
+ ///
+ /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
+ pub confirmations: Option<u32>,
/// The number of blocks (after our commitment transaction confirms) that we will need to wait
/// until we can claim our funds after we force-close the channel. During this time our
/// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
#[derive(Clone, Debug)]
pub enum PaymentSendFailure {
/// A parameter which was passed to send_payment was invalid, preventing us from attempting to
- /// send the payment at all. No channel state has been changed or messages sent to peers, and
- /// once you've changed the parameter at error, you can freely retry the payment in full.
+ /// send the payment at all.
+ ///
+ /// You can freely resend the payment in full (with the parameter error fixed).
+ ///
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
ParameterError(APIError),
/// A parameter in a single path which was passed to send_payment was invalid, preventing us
- /// from attempting to send the payment at all. No channel state has been changed or messages
- /// sent to peers, and once you've changed the parameter at error, you can freely retry the
- /// payment in full.
+ /// from attempting to send the payment at all.
+ ///
+ /// You can freely resend the payment in full (with the parameter error fixed).
///
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment.
+ ///
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
PathParameterError(Vec<Result<(), APIError>>),
/// All paths which were attempted failed to send, with no channel state change taking place.
- /// You can freely retry the payment in full (though you probably want to do so over different
+ /// You can freely resend the payment in full (though you probably want to do so over different
/// paths than the ones selected).
///
- /// [`ChannelManager::abandon_payment`] does *not* need to be called for this payment and
- /// [`ChannelManager::retry_payment`] will *not* work for this payment.
- AllFailedRetrySafe(Vec<APIError>),
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
+ AllFailedResendSafe(Vec<APIError>),
+ /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
+ /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via
+ /// [`ChannelManager::abandon_payment`]).
+ ///
+ /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ DuplicatePayment,
/// Some paths which were attempted failed to send, though possibly not all. At least some
/// paths have irrevocably committed to the HTLC and retrying the payment in full would result
/// in over-/re-payment.
}
}
-macro_rules! handle_chan_restoration_locked {
- ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
- $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
- let mut htlc_forwards = None;
-
- let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
- let chanmon_update_is_none = chanmon_update.is_none();
- let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
- let res = loop {
- let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
- if !forwards.is_empty() {
- htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
- $channel_entry.get().get_funding_txo().unwrap(), forwards));
- }
-
- if chanmon_update.is_some() {
- // On reconnect, we, by definition, only resend a channel_ready if there have been
- // no commitment updates, so the only channel monitor update which could also be
- // associated with a channel_ready would be the funding_created/funding_signed
- // monitor update. That monitor update failing implies that we won't send
- // channel_ready until it's been updated, so we can't have a channel_ready and a
- // monitor update here (so we don't bother to handle it correctly below).
- assert!($channel_ready.is_none());
- // A channel monitor update makes no sense without either a channel_ready or a
- // commitment update to process after it. Since we can't have a channel_ready, we
- // only bother to handle the monitor-update + commitment_update case below.
- assert!($commitment_update.is_some());
- }
-
- if let Some(msg) = $channel_ready {
- // Similar to the above, this implies that we're letting the channel_ready fly
- // before it should be allowed to.
- assert!(chanmon_update.is_none());
- send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
- }
- if let Some(msg) = $announcement_sigs {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: counterparty_node_id,
- msg,
- });
- }
-
- emit_channel_ready_event!($self, $channel_entry.get_mut());
-
- let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
- if let Some(monitor_update) = chanmon_update {
- // We only ever broadcast a funding transaction in response to a funding_signed
- // message and the resulting monitor update. Thus, on channel_reestablish
- // message handling we can't have a funding transaction to broadcast. When
- // processing a monitor update finishing resulting in a funding broadcast, we
- // cannot have a second monitor update, thus this case would indicate a bug.
- assert!(funding_broadcastable.is_none());
- // Given we were just reconnected or finished updating a channel monitor, the
- // only case where we can get a new ChannelMonitorUpdate would be if we also
- // have some commitment updates to send as well.
- assert!($commitment_update.is_some());
- match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
- }
- }
- }
-
- macro_rules! handle_cs { () => {
- if let Some(update) = $commitment_update {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id,
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = $raa {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id,
- msg: revoke_and_ack,
- });
- }
- } }
- match $order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if let Some(tx) = funding_broadcastable {
- log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- $self.tx_broadcaster.broadcast_transaction(&tx);
- }
- break Ok(());
- };
-
- if chanmon_update_is_none {
- // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
- // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
- // should *never* end up calling back to `chain_monitor.update_channel()`.
- assert!(res.is_ok());
- }
-
- (htlc_forwards, res, counterparty_node_id)
- } }
-}
-
-macro_rules! post_handle_chan_restoration {
- ($self: ident, $locked_res: expr) => { {
- let (htlc_forwards, res, counterparty_node_id) = $locked_res;
-
- let _ = handle_error!($self, res, counterparty_node_id);
-
- if let Some(forwards) = htlc_forwards {
- $self.forward_htlcs(&mut [forwards][..]);
- }
- } }
-}
-
impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
- claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
forward_htlcs: Mutex::new(HashMap::new()),
+ claimable_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
///
/// `user_channel_id` will be provided back as in
/// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
- /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0
- /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here.
- /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise
- /// ignored.
+ /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
+ /// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
+ /// is simply copied to events and otherwise ignored.
///
/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
- pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
+ pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
let mut res = Vec::new();
{
let channel_state = self.channel_state.lock().unwrap();
+ let best_block_height = self.best_block.read().unwrap().height();
res.reserve(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
let balance = channel.get_available_balances();
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
+ confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
is_outbound: channel.is_outbound(),
is_channel_ready: channel.is_usable(),
if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
}
- let per_peer_state = self.per_peer_state.read().unwrap();
- let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
- Some(peer_state) => {
- let peer_state = peer_state.lock().unwrap();
- let their_features = &peer_state.latest_features;
- chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
- },
- None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ let (shutdown_msg, monitor_update, htlcs) = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ }
};
failed_htlcs = htlcs;
}
let routing = match hop_data.format {
- msgs::OnionHopDataFormat::Legacy { .. } => {
- return Err(ReceiveError {
- err_code: 0x4000|0x2000|3,
- err_data: Vec::new(),
- msg: "We require payment_secrets",
- });
- },
msgs::OnionHopDataFormat::NonFinalNode { .. } => {
return Err(ReceiveError {
err_code: 0x4000|22,
routing,
payment_hash,
incoming_shared_secret: shared_secret,
- amt_to_forward: amt_msat,
+ incoming_amt_msat: Some(amt_msat),
+ outgoing_amt_msat: amt_msat,
outgoing_cltv_value: hop_data.outgoing_cltv_value,
})
}
};
let short_channel_id = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::FinalNode { .. } => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
},
payment_hash: msg.payment_hash.clone(),
incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
+ incoming_amt_msat: Some(msg.amount_msat),
+ outgoing_amt_msat: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
}
};
- if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// with a short_channel_id of 0. This is important as various things later assume
// short_channel_id is non-0 in any ::Forward.
None => { // unknown_next_peer
// Note that this is likely a timing oracle for detecting whether an scid is a
// phantom.
- if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id) {
+ if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) {
None
} else {
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
if !chan.is_live() { // channel_disabled
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt));
}
- if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
}
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) {
+ if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
break Some((err, code, chan_update_opt));
}
chan_update_opt
let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
match pending_outbounds.entry(payment_id) {
- hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::ParameterError(APIError::RouteError {
- err: "Payment already in progress"
- })),
+ hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
hash_map::Entry::Vacant(entry) => {
let payment = entry.insert(PendingOutboundPayment::Retryable {
session_privs: HashSet::new(),
// `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`.
let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some();
debug_assert!(removed, "We should always have a pending payment to remove here");
- Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
+ Err(PaymentSendFailure::AllFailedResendSafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
} else {
Ok(())
}
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
let mut handle_errors = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 {
macro_rules! forwarding_channel_not_found {
() => {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
- prev_funding_outpoint } => {
- macro_rules! failure_handler {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- phantom_shared_secret: $phantom_ss,
- });
-
- let reason = if $next_hop_unknown {
- HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
- } else {
- HTLCDestination::FailedPayment{ payment_hash }
- };
-
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
- reason
- ));
- continue;
- }
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+ forward_info: PendingHTLCInfo {
+ routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
+ outgoing_cltv_value, incoming_amt_msat: _
+ }
+ }) => {
+ macro_rules! failure_handler {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ phantom_shared_secret: $phantom_ss,
+ });
+
+ let reason = if $next_hop_unknown {
+ HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+ } else {
+ HTLCDestination::FailedPayment{ payment_hash }
+ };
+
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+ reason
+ ));
+ continue;
}
- macro_rules! fail_forward {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
- {
- failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
- }
+ }
+ macro_rules! fail_forward {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
}
}
- macro_rules! failed_payment {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
- {
- failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
- }
+ }
+ macro_rules! failed_payment {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
- if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
- let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
- if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
- let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
- let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
- Ok(res) => res,
- Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
- let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
- // In this scenario, the phantom would have sent us an
- // `update_fail_malformed_htlc`, meaning here we encrypt the error as
- // if it came from us (the second-to-last hop) but contains the sha256
- // of the onion.
- failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
- },
- Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
- },
- };
- match next_hop {
- onion_utils::Hop::Receive(hop_data) => {
- match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
- Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
- }
- },
- _ => panic!(),
- }
- } else {
- fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
+ }
+ if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
+ let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
+ if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+ let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
+ let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+ Ok(res) => res,
+ Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
+ let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
+ // In this scenario, the phantom would have sent us an
+ // `update_fail_malformed_htlc`, meaning here we encrypt the error as
+ // if it came from us (the second-to-last hop) but contains the sha256
+ // of the onion.
+ failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+ },
+ Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
+ failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+ },
+ };
+ match next_hop {
+ onion_utils::Hop::Receive(hop_data) => {
+ match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+ }
+ },
+ _ => panic!(),
}
} else {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
- },
+ } else {
+ fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
+ }
+ },
HTLCForwardInfo::FailHTLC { .. } => {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
continue;
}
};
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(forward_chan_id) {
hash_map::Entry::Vacant(_) => {
forwarding_channel_not_found!();
let mut fail_htlc_msgs = Vec::new();
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet, ..
- }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
- prev_funding_outpoint } => {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
+ forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
+ },
+ }) => {
log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
});
- match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
+ match chan.get_mut().send_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
} else {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
- prev_funding_outpoint } => {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+ forward_info: PendingHTLCInfo {
+ routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
+ }
+ }) => {
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
let _legacy_hop_data = Some(payment_data.clone());
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
},
- value: amt_to_forward,
+ value: outgoing_amt_msat,
timer_ticks: 0,
- total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
+ total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
cltv_expiry,
onion_payload,
};
));
}
}
+ let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
+ let mut receiver_node_id = self.our_network_pubkey;
+ if phantom_shared_secret.is_some() {
+ receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ }
macro_rules! check_total_value {
($payment_data: expr, $payment_preimage: expr) => {{
payment_secret: $payment_data.payment_secret,
}
};
- let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+ let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let (_, htlcs) = claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value == $payment_data.total_msat {
+ let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
new_events.push(events::Event::PaymentReceived {
+ receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: purpose(),
amount_msat: total_value,
+ via_channel_id: Some(prev_channel_id),
+ via_user_channel_id: Some(prev_user_channel_id),
});
payment_received_generated = true;
} else {
check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
- match channel_state.claimable_htlcs.entry(payment_hash) {
+ match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
+ let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentReceived {
+ receiver_node_id: Some(receiver_node_id),
payment_hash,
- amount_msat: amt_to_forward,
+ amount_msat: outgoing_amt_msat,
purpose,
+ via_channel_id: Some(prev_channel_id),
+ via_user_channel_id: Some(prev_user_channel_id),
});
},
hash_map::Entry::Occupied(_) => {
true
});
+ }
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- if htlcs.is_empty() {
- // This should be unreachable
- debug_assert!(false);
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ if htlcs.is_empty() {
+ // This should be unreachable
+ debug_assert!(false);
+ return false;
+ }
+ if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+ // In this case we're not going to handle any timeouts of the parts here.
+ if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ return true;
+ } else if htlcs.into_iter().any(|htlc| {
+ htlc.timer_ticks += 1;
+ return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+ }) {
+ timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
return false;
}
- if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
- // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
- // In this case we're not going to handle any timeouts of the parts here.
- if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
- return true;
- } else if htlcs.into_iter().any(|htlc| {
- htlc.timer_ticks += 1;
- return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
- }) {
- timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
- return false;
- }
- }
- true
- });
- }
+ }
+ true
+ });
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.claimable_htlcs.remove(payment_hash)
- };
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
let mut claimed_any_htlcs = false;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
+ let mut receiver_node_id = Some(self.our_network_pubkey);
for htlc in sources.iter() {
let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
Some((_cp_id, chan_id)) => chan_id.clone(),
break;
}
}
+ let phantom_shared_secret = htlc.prev_hop.phantom_shared_secret;
+ if phantom_shared_secret.is_some() {
+ let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ receiver_node_id = Some(phantom_pubkey)
+ }
claimable_amt_msat += htlc.value;
}
if claimed_any_htlcs {
self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ receiver_node_id,
payment_hash,
purpose: payment_purpose,
amount_msat: claimable_amt_msat,
fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let channel_state = &mut **channel_state_lock;
- let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
- Some((_cp_id, chan_id)) => chan_id.clone(),
- None => {
- return ClaimFundsFromHop::PrevHopForceClosed
- }
- };
+ let chan_id = prev_hop.outpoint.to_channel_id();
+ let channel_state = &mut **channel_state_lock;
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
Ok(msgs_monitor_option) => {
self.our_network_pubkey.clone()
}
+ /// Handles a channel reentering a functional state, either due to reconnect or a monitor
+ /// update completion.
+ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
+ channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
+ commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
+ pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+ channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
+ -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ let mut htlc_forwards = None;
+
+ let counterparty_node_id = channel.get_counterparty_node_id();
+ if !pending_forwards.is_empty() {
+ htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
+ channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+ }
+
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, pending_msg_events, channel, msg);
+ }
+ if let Some(msg) = announcement_sigs {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ }
+
+ emit_channel_ready_event!(self, channel);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+
+ if let Some(tx) = funding_broadcastable {
+ log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+
+ htlc_forwards
+ }
+
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let chan_restoration_res;
+ let htlc_forwards;
let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
})
} else { None }
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
(updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
- post_handle_chan_restoration!(self, chan_restoration_res);
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
///
/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
- pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
}
///
/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
- pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
}
- fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> {
+ fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state_lock = self.channel_state.lock().unwrap();
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
+ let mut random_bytes = [0u8; 16];
+ random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]);
+ let user_channel_id = u128::from_be_bytes(random_bytes);
+
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager,
- counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration,
+ counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration,
self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
{
Err(e) => {
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(0),
+ msg: channel.accept_inbound_channel(user_channel_id),
});
} else {
let mut pending_events = self.pending_events.lock().unwrap();
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
}) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
- prev_htlc_id, forward_info });
+ entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
- prev_htlc_id, forward_info }));
+ entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
raa_updates.finalized_claimed_htlcs,
chan.get().get_short_channel_id()
.unwrap_or(chan.get().outbound_scid_alias()),
- chan.get().get_funding_txo().unwrap()))
+ chan.get().get_funding_txo().unwrap(),
+ chan.get().get_user_id()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
match res {
Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
- short_channel_id, channel_outpoint)) =>
+ short_channel_id, channel_outpoint, user_channel_id)) =>
{
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
- self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
+ self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
Ok(())
},
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), chan);
+ try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let chan_restoration_res;
- let (htlcs_failed_forward, need_lnd_workaround) = {
+ let htlc_forwards;
+ let need_lnd_workaround = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
}
}
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
- chan_restoration_res = handle_chan_restoration_locked!(
- self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
- responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(
+ &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (responses.holding_cell_failed_htlcs, need_lnd_workaround)
+ need_lnd_workaround
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- post_handle_chan_restoration!(self, chan_restoration_res);
- self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
}
+ /// Gets inflight HTLC information by processing pending outbound payments that are in
+ /// our channels. May be used during pathfinding to account for in-use channel liquidity.
+ pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
+ let mut inflight_htlcs = InFlightHtlcs::new();
+
+ for chan in self.channel_state.lock().unwrap().by_id.values() {
+ for htlc_source in chan.inflight_htlc_sources() {
+ if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
+ inflight_htlcs.process_path(path, self.get_our_node_id());
+ }
+ }
+ }
+
+ inflight_htlcs
+ }
+
#[cfg(any(test, fuzzing, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
- let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
+ let event_handler = |event: events::Event| events.borrow_mut().push(event);
self.process_pending_events(&event_handler);
events.into_inner()
}
pub fn clear_pending_payments(&self) {
self.pending_outbound_payments.lock().unwrap().clear()
}
+
+ /// Processes any events asynchronously in the order they were generated since the last call
+ /// using the given event handler.
+ ///
+ /// See the trait-level documentation of [`EventsProvider`] for requirements.
+ pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
+ &self, handler: H
+ ) {
+ // We'll acquire our total consistency lock until the returned future completes so that
+ // we can be sure no other persists happen while processing events.
+ let _read_guard = self.total_consistency_lock.read().unwrap();
+
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events {
+ handler(event).await;
+ }
+
+ if result == NotifyOption::DoPersist {
+ self.persistence_notifier.notify();
+ }
+ }
}
impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, L>
result = NotifyOption::DoPersist;
}
- let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
- for event in pending_events.drain(..) {
- handler.handle_event(&event);
+ for event in pending_events {
+ handler.handle_event(event);
}
result
});
}
- fn get_relevant_txids(&self) -> Vec<Txid> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
for chan in channel_state.by_id.values() {
- if let Some(funding_txo) = chan.get_funding_txo() {
- res.push(funding_txo.txid);
+ if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+ res.push((funding_txo.txid, block_hash));
}
}
res
}
true
});
+ }
- if let Some(height) = height_opt {
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
- false
- } else { true }
- });
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ if let Some(height) = height_opt {
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+ false
+ } else { true }
});
- }
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ });
}
self.handle_init_event_channel_failures(failed_channels);
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
/// indicating whether persistence is necessary. Only one listener on
- /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
- /// up.
+ /// [`await_persistable_update`], [`await_persistable_update_timeout`], or a future returned by
+ /// [`get_persistable_update_future`] is guaranteed to be woken up.
///
/// Note that this method is not available with the `no-std` feature.
+ ///
+ /// [`await_persistable_update`]: Self::await_persistable_update
+ /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout
+ /// [`get_persistable_update_future`]: Self::get_persistable_update_future
#[cfg(any(test, feature = "std"))]
pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
self.persistence_notifier.wait_timeout(max_wait)
}
/// Blocks until ChannelManager needs to be persisted. Only one listener on
- /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
- /// up.
+ /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by
+ /// [`get_persistable_update_future`] is guaranteed to be woken up.
+ ///
+ /// [`await_persistable_update`]: Self::await_persistable_update
+ /// [`get_persistable_update_future`]: Self::get_persistable_update_future
pub fn await_persistable_update(&self) {
self.persistence_notifier.wait()
}
}
}
-impl<M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
ChannelMessageHandler for ChannelManager<M, T, K, F, L>
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
(11, outbound_htlc_maximum_msat, option),
});
-impl_writeable_tlv_based!(ChannelDetails, {
- (1, inbound_scid_alias, option),
- (2, channel_id, required),
- (3, channel_type, option),
- (4, counterparty, required),
- (5, outbound_scid_alias, option),
- (6, funding_txo, option),
- (7, config, option),
- (8, short_channel_id, option),
- (10, channel_value_satoshis, required),
- (12, unspendable_punishment_reserve, option),
- (14, user_channel_id, required),
- (16, balance_msat, required),
- (18, outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
- (20, inbound_capacity_msat, required),
- (22, confirmations_required, option),
- (24, force_close_spend_delay, option),
- (26, is_outbound, required),
- (28, is_channel_ready, required),
- (30, is_usable, required),
- (32, is_public, required),
- (33, inbound_htlc_minimum_msat, option),
- (35, inbound_htlc_maximum_msat, option),
-});
+impl Writeable for ChannelDetails {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
+ let user_channel_id_low = self.user_channel_id as u64;
+ let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
+ write_tlv_fields!(writer, {
+ (1, self.inbound_scid_alias, option),
+ (2, self.channel_id, required),
+ (3, self.channel_type, option),
+ (4, self.counterparty, required),
+ (5, self.outbound_scid_alias, option),
+ (6, self.funding_txo, option),
+ (7, self.config, option),
+ (8, self.short_channel_id, option),
+ (9, self.confirmations, option),
+ (10, self.channel_value_satoshis, required),
+ (12, self.unspendable_punishment_reserve, option),
+ (14, user_channel_id_low, required),
+ (16, self.balance_msat, required),
+ (18, self.outbound_capacity_msat, required),
+ // Note that by the time we get past the required read above, outbound_capacity_msat will be
+ // filled in, so we can safely unwrap it here.
+ (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (20, self.inbound_capacity_msat, required),
+ (22, self.confirmations_required, option),
+ (24, self.force_close_spend_delay, option),
+ (26, self.is_outbound, required),
+ (28, self.is_channel_ready, required),
+ (30, self.is_usable, required),
+ (32, self.is_public, required),
+ (33, self.inbound_htlc_minimum_msat, option),
+ (35, self.inbound_htlc_maximum_msat, option),
+ (37, user_channel_id_high_opt, option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for ChannelDetails {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ init_and_read_tlv_fields!(reader, {
+ (1, inbound_scid_alias, option),
+ (2, channel_id, required),
+ (3, channel_type, option),
+ (4, counterparty, required),
+ (5, outbound_scid_alias, option),
+ (6, funding_txo, option),
+ (7, config, option),
+ (8, short_channel_id, option),
+ (9, confirmations, option),
+ (10, channel_value_satoshis, required),
+ (12, unspendable_punishment_reserve, option),
+ (14, user_channel_id_low, required),
+ (16, balance_msat, required),
+ (18, outbound_capacity_msat, required),
+ // Note that by the time we get past the required read above, outbound_capacity_msat will be
+ // filled in, so we can safely unwrap it here.
+ (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (20, inbound_capacity_msat, required),
+ (22, confirmations_required, option),
+ (24, force_close_spend_delay, option),
+ (26, is_outbound, required),
+ (28, is_channel_ready, required),
+ (30, is_usable, required),
+ (32, is_public, required),
+ (33, inbound_htlc_minimum_msat, option),
+ (35, inbound_htlc_maximum_msat, option),
+ (37, user_channel_id_high_opt, option),
+ });
+
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
+ let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
+ let user_channel_id = user_channel_id_low as u128 +
+ ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
+
+ Ok(Self {
+ inbound_scid_alias,
+ channel_id: channel_id.0.unwrap(),
+ channel_type,
+ counterparty: counterparty.0.unwrap(),
+ outbound_scid_alias,
+ funding_txo,
+ config,
+ short_channel_id,
+ channel_value_satoshis: channel_value_satoshis.0.unwrap(),
+ unspendable_punishment_reserve,
+ user_channel_id,
+ balance_msat: balance_msat.0.unwrap(),
+ outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
+ next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+ inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
+ confirmations_required,
+ confirmations,
+ force_close_spend_delay,
+ is_outbound: is_outbound.0.unwrap(),
+ is_channel_ready: is_channel_ready.0.unwrap(),
+ is_usable: is_usable.0.unwrap(),
+ is_public: is_public.0.unwrap(),
+ inbound_htlc_minimum_msat,
+ inbound_htlc_maximum_msat,
+ })
+ }
+}
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, vec_type),
(0, routing, required),
(2, incoming_shared_secret, required),
(4, payment_hash, required),
- (6, amt_to_forward, required),
- (8, outgoing_cltv_value, required)
+ (6, outgoing_amt_msat, required),
+ (8, outgoing_cltv_value, required),
+ (9, incoming_amt_msat, option),
});
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
- (4, path, vec_type),
+ (4, *path, vec_type),
(5, payment_params, option),
});
}
},
;);
+impl_writeable_tlv_based!(PendingAddHTLCInfo, {
+ (0, forward_info, required),
+ (1, prev_user_channel_id, (default_value, 0)),
+ (2, prev_short_channel_id, required),
+ (4, prev_htlc_id, required),
+ (6, prev_funding_outpoint, required),
+});
+
impl_writeable_tlv_based_enum!(HTLCForwardInfo,
- (0, AddHTLC) => {
- (0, forward_info, required),
- (2, prev_short_channel_id, required),
- (4, prev_htlc_id, required),
- (6, prev_funding_outpoint, required),
- },
(1, FailHTLC) => {
(0, htlc_id, required),
(2, err_packet, required),
- },
-;);
+ };
+ (0, AddHTLC)
+);
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
}
}
- let channel_state = self.channel_state.lock().unwrap();
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (channel_state.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
+ (claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
for htlc in previous_hops.iter() {
peer_state.latest_features.write(writer)?;
}
- let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
- let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
let events = self.pending_events.lock().unwrap();
(events.len() as u64).write(writer)?;
for event in events.iter() {
/// which you've already broadcasted the transaction.
///
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
-pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- where M::Target: chain::Watch<Signer>,
+pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// this struct.
///
/// (C-not exported) because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<Signer>>,
+ pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>,
}
-impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ChannelManagerReadArgs<'a, M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<Signer>>) -> Self {
+ mut channel_monitors: Vec<&'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>) -> Self {
Self {
keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
let mut claimable_amt_msat = 0;
+ let mut receiver_node_id = Some(our_network_pubkey);
+ let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
+ if phantom_shared_secret.is_some() {
+ let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ receiver_node_id = Some(phantom_pubkey)
+ }
for claimable_htlc in claimable_htlcs {
claimable_amt_msat += claimable_htlc.value;
}
}
pending_events_read.push(events::Event::PaymentClaimed {
+ receiver_node_id,
payment_hash,
purpose: payment_purpose,
amount_msat: claimable_amt_msat,
channel_state: Mutex::new(ChannelHolder {
by_id,
- claimable_htlcs,
pending_msg_events: Vec::new(),
}),
inbound_payment_key: expanded_inbound_key,
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
+ claimable_htlcs: Mutex::new(claimable_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
// Byte 2
BasicMPP,
]);
+ define_context!(OfferContext, []);
// This isn't a "real" feature context, and is only used in the channel_type field in an
// `OpenChannel` message.
define_context!(ChannelTypeContext, [
supports_keysend, requires_keysend);
#[cfg(test)]
- define_feature!(123456789, UnknownFeature, [NodeContext, ChannelContext, InvoiceContext],
+ define_feature!(123456789, UnknownFeature, [NodeContext, ChannelContext, InvoiceContext, OfferContext],
"Feature flags for an unknown feature used in testing.", set_unknown_feature_optional,
set_unknown_feature_required, supports_unknown_test_feature, requires_unknown_test_feature);
}
pub type ChannelFeatures = Features<sealed::ChannelContext>;
/// Features used within an invoice.
pub type InvoiceFeatures = Features<sealed::InvoiceContext>;
+/// Features used within an offer.
+pub type OfferFeatures = Features<sealed::OfferContext>;
/// Features used within the channel_type field in an OpenChannel message.
///
}
}
+#[cfg(test)]
+impl<T: sealed::UnknownFeature> Features<T> {
+ pub(crate) fn unknown() -> Self {
+ let mut features = Self::empty();
+ features.set_unknown_feature_required();
+ features
+ }
+}
+
macro_rules! impl_feature_len_prefixed_write {
($features: ident) => {
impl Writeable for $features {
impl_feature_len_prefixed_write!(NodeFeatures);
impl_feature_len_prefixed_write!(InvoiceFeatures);
-// Because ChannelTypeFeatures only appears inside of TLVs, it doesn't have a length prefix when
-// serialized. Thus, we can't use `impl_feature_len_prefixed_write`, above, and have to write our
-// own serialization.
-impl Writeable for ChannelTypeFeatures {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
- self.write_be(w)
- }
-}
-impl Readable for ChannelTypeFeatures {
- fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
- let v = io_extras::read_to_end(r)?;
- Ok(Self::from_be_bytes(v))
+// Some features only appear inside of TLVs, so they don't have a length prefix when serialized.
+macro_rules! impl_feature_tlv_write {
+ ($features: ident) => {
+ impl Writeable for $features {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.write_be(w)
+ }
+ }
+ impl Readable for $features {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let v = io_extras::read_to_end(r)?;
+ Ok(Self::from_be_bytes(v))
+ }
+ }
}
}
+impl_feature_tlv_write!(ChannelTypeFeatures);
+impl_feature_tlv_write!(OfferFeatures);
+
#[cfg(test)]
mod tests {
use super::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, InvoiceFeatures, NodeFeatures, sealed};
/// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
/// make a single `best_block_updated` call.
TransactionsFirstSkippingBlocks,
+ /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
+ /// make a single `best_block_updated` call. Further, we call `transactions_confirmed` multiple
+ /// times to ensure it's idempotent.
+ TransactionsDuplicativelyFirstSkippingBlocks,
+ /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
+ /// make a single `best_block_updated` call. Further, we call `transactions_confirmed` multiple
+ /// times to ensure it's idempotent.
+ HighlyRedundantTransactionsFirstSkippingBlocks,
/// The same as `TransactionsFirst` when connecting blocks. During disconnection only
/// `transaction_unconfirmed` is called.
TransactionsFirstReorgsOnlyTip,
use core::hash::{BuildHasher, Hasher};
// Get a random value using the only std API to do so - the DefaultHasher
let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
- let res = match rand_val % 7 {
+ let res = match rand_val % 9 {
0 => ConnectStyle::BestBlockFirst,
1 => ConnectStyle::BestBlockFirstSkippingBlocks,
2 => ConnectStyle::BestBlockFirstReorgsOnlyTip,
3 => ConnectStyle::TransactionsFirst,
4 => ConnectStyle::TransactionsFirstSkippingBlocks,
- 5 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
- 6 => ConnectStyle::FullBlockViaListen,
+ 5 => ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks,
+ 6 => ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks,
+ 7 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
+ 8 => ConnectStyle::FullBlockViaListen,
_ => unreachable!(),
};
eprintln!("Using Block Connection Style: {:?}", res);
pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
let skip_intermediaries = match *node.connect_style.borrow() {
ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
+ ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks|ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|
ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => true,
_ => false,
};
node.node.best_block_updated(&block.header, height);
node.node.transactions_confirmed(&block.header, &txdata, height);
},
- ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
+ ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|
+ ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks|ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|
+ ConnectStyle::TransactionsFirstReorgsOnlyTip => {
+ if *node.connect_style.borrow() == ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks {
+ let mut connections = Vec::new();
+ for (block, height) in node.blocks.lock().unwrap().iter() {
+ if !block.txdata.is_empty() {
+ // Reconnect all transactions we've ever seen to ensure transaction connection
+ // is *really* idempotent. This is a somewhat likely deployment for some
+ // esplora implementations of chain sync which try to reduce state and
+ // complexity as much as possible.
+ //
+ // Sadly we have to clone the block here to maintain lockorder. In the
+ // future we should consider Arc'ing the blocks to avoid this.
+ connections.push((block.clone(), *height));
+ }
+ }
+ for (old_block, height) in connections {
+ node.chain_monitor.chain_monitor.transactions_confirmed(&old_block.header,
+ &old_block.txdata.iter().enumerate().collect::<Vec<_>>(), height);
+ }
+ }
node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
+ if *node.connect_style.borrow() == ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks {
+ node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
+ }
call_claimable_balances(node);
node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
node.node.transactions_confirmed(&block.header, &txdata, height);
node.chain_monitor.chain_monitor.block_disconnected(&orig.0.header, orig.1);
Listen::block_disconnected(node.node, &orig.0.header, orig.1);
},
- ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
+ ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
+ ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks => {
if i == count - 1 {
node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
node.node.best_block_updated(&prev.0.header, prev.1);
pub struct Node<'a, 'b: 'a, 'c: 'b> {
pub chain_source: &'c test_utils::TestChainSource,
pub tx_broadcaster: &'c test_utils::TestBroadcaster,
+ pub fee_estimator: &'c test_utils::TestFeeEstimator,
pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
pub keys_manager: &'b test_utils::TestKeysInterface,
pub node: &'a ChannelManager<&'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
macro_rules! unwrap_send_err {
($res: expr, $all_failed: expr, $type: pat, $check: expr) => {
match &$res {
- &Err(PaymentSendFailure::AllFailedRetrySafe(ref fails)) if $all_failed => {
+ &Err(PaymentSendFailure::AllFailedResendSafe(ref fails)) if $all_failed => {
assert_eq!(fails.len(), 1);
match fails[0] {
$type => { $check },
}
}
-pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
+pub fn _reload_node<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, default_config: UserConfig, chanman_encoded: &[u8], monitors_encoded: &[&[u8]]) -> ChannelManager<&'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger> {
+ let mut monitors_read = Vec::with_capacity(monitors_encoded.len());
+ for encoded in monitors_encoded {
+ let mut monitor_read = &encoded[..];
+ let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>
+ ::read(&mut monitor_read, node.keys_manager).unwrap();
+ assert!(monitor_read.is_empty());
+ monitors_read.push(monitor);
+ }
+
+ let mut node_read = &chanman_encoded[..];
+ let (_, node_deserialized) = {
+ let mut channel_monitors = HashMap::new();
+ for monitor in monitors_read.iter_mut() {
+ assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
+ }
+ <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut node_read, ChannelManagerReadArgs {
+ default_config,
+ keys_manager: node.keys_manager,
+ fee_estimator: node.fee_estimator,
+ chain_monitor: node.chain_monitor,
+ tx_broadcaster: node.tx_broadcaster,
+ logger: node.logger,
+ channel_monitors,
+ }).unwrap()
+ };
+ assert!(node_read.is_empty());
+
+ for monitor in monitors_read.drain(..) {
+ assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ ChannelMonitorUpdateStatus::Completed);
+ check_added_monitors!(node, 1);
+ }
+
+ node_deserialized
+}
+
+#[cfg(test)]
+macro_rules! reload_node {
+ ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => {
+ let chanman_encoded = $chanman_encoded;
+
+ $persister = test_utils::TestPersister::new();
+ $new_chain_monitor = test_utils::TestChainMonitor::new(Some($node.chain_source), $node.tx_broadcaster.clone(), $node.logger, $node.fee_estimator, &$persister, &$node.keys_manager);
+ $node.chain_monitor = &$new_chain_monitor;
+
+ $new_channelmanager = _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded);
+ $node.node = &$new_channelmanager;
+ };
+ ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => {
+ reload_node!($node, $crate::util::config::UserConfig::default(), $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, $new_channelmanager);
+ };
+}
+
+pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> ([u8; 32], Transaction, OutPoint) {
let chan_id = *node.network_chan_count.borrow();
let events = node.node.get_and_clear_pending_events();
let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id());
assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+ assert_eq!(node_a.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 42);
node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &open_channel_msg);
let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id());
assert_eq!(accept_channel_msg.temporary_channel_id, create_chan_id);
node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &accept_channel_msg);
+ assert_ne!(node_b.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 0);
sign_funding_transaction(node_a, node_b, channel_value, create_chan_id)
}
}
}}
}
-
#[macro_export]
#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
macro_rules! expect_payment_received {
($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
- expect_payment_received!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None)
+ expect_payment_received!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None, $node.node.get_our_node_id())
};
- ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr, $expected_payment_preimage: expr) => {
+ ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr, $expected_payment_preimage: expr, $expected_receiver_node_id: expr) => {
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id: _, via_user_channel_id: _ } => {
assert_eq!($expected_payment_hash, *payment_hash);
assert_eq!($expected_recv_value, amount_msat);
+ assert_eq!($expected_receiver_node_id, receiver_node_id.unwrap());
match purpose {
$crate::util::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert_eq!(&$expected_payment_preimage, payment_preimage);
if payment_received_expected {
assert_eq!(events_2.len(), 1);
match events_2[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_id, ref via_user_channel_id } => {
assert_eq!(our_payment_hash, *payment_hash);
+ assert_eq!(node.node.get_our_node_id(), receiver_node_id.unwrap());
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert_eq!(expected_preimage, *payment_preimage);
},
}
assert_eq!(amount_msat, recv_value);
+ assert!(node.node.list_channels().iter().any(|details| details.channel_id == via_channel_id.unwrap()));
+ assert!(node.node.list_channels().iter().any(|details| details.user_channel_id == via_user_channel_id.unwrap()));
},
_ => panic!("Unexpected event"),
}
let gossip_sync = P2PGossipSync::new(&cfgs[i].network_graph, None, cfgs[i].logger);
nodes.push(Node{
chain_source: cfgs[i].chain_source, tx_broadcaster: cfgs[i].tx_broadcaster,
+ fee_estimator: cfgs[i].fee_estimator,
chain_monitor: &cfgs[i].chain_monitor, keys_manager: &cfgs[i].keys_manager,
node: &chan_mgrs[i], network_graph: &cfgs[i].network_graph, gossip_sync,
node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
}
+ let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both
+ if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ idx += 1;
+ assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+ had_channel_update = true;
+ }
+
let mut revoke_and_ack = None;
let mut commitment_update = None;
let order = if let Some(ev) = msg_events.get(idx) {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
idx += 1;
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+ assert!(!had_channel_update);
}
assert_eq!(msg_events.len(), idx);
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::chain::channelmonitor;
-use crate::chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::{BaseSign, KeysInterface};
use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
-use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
+use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{Channel, ChannelError};
use crate::ln::{chan_utils, onion_utils};
use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
use core::default::Default;
use core::iter::repeat;
use bitcoin::hashes::Hash;
-use crate::sync::{Arc, Mutex};
+use crate::sync::Mutex;
use crate::ln::functional_test_utils::*;
use crate::ln::chan_utils::CommitmentTransaction;
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1);
let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
match err {
- PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
+ PaymentSendFailure::AllFailedResendSafe(ref fails) => {
match &fails[0] {
&APIError::ChannelUnavailable{ref err} =>
assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
let events = nodes[2].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(our_payment_hash_21, *payment_hash);
assert_eq!(recv_value_21, amount_msat);
+ assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
+ assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(our_payment_hash_22, *payment_hash);
assert_eq!(recv_value_22, amount_msat);
+ assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
+ assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
- assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
+ assert!(node_txn.len() == 4 || node_txn.len() == 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
let commitment_spend =
if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
- check_spends!(node_txn[1], commitment_tx[0]);
- check_spends!(node_txn[2], commitment_tx[0]);
- assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
+ if node_txn.len() == 6 {
+ // In some block `ConnectionStyle`s we may avoid broadcasting the double-spending
+ // transactions spending the HTLC outputs of C's commitment transaction. Otherwise,
+ // check that the extra broadcasts (double-)spend those here.
+ check_spends!(node_txn[1], commitment_tx[0]);
+ check_spends!(node_txn[2], commitment_tx[0]);
+ assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
+ }
&node_txn[0]
} else {
check_spends!(node_txn[0], commitment_tx[0]);
assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.lock_time.0, 0);
assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
- check_spends!(node_txn[3], chan_1.3);
- assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
- check_spends!(node_txn[4], node_txn[3]);
- check_spends!(node_txn[5], node_txn[3]);
+ let funding_spend_offset = if node_txn.len() == 6 { 3 } else { 1 };
+ check_spends!(node_txn[funding_spend_offset], chan_1.3);
+ assert_eq!(node_txn[funding_spend_offset].input[0].witness.clone().last().unwrap().len(), 71);
+ check_spends!(node_txn[funding_spend_offset + 1], node_txn[funding_spend_offset]);
+ check_spends!(node_txn[funding_spend_offset + 2], node_txn[funding_spend_offset]);
// We don't bother to check that B can claim the HTLC output on its commitment tx here as
// we already checked the same situation with A.
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
+ // We rely on the ability to connect a block redundantly, which isn't allowed via
+ // `chain::Listen`, so we never run the test if we randomly get assigned that
+ // connect_style.
+ return;
+ }
create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
route_payment(&nodes[0], &[&nodes[1]], 10000000);
// Duplicate the connect_block call since this may happen due to other listeners
// registering new transactions
- header.prev_blockhash = header.block_hash();
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
}
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let mut as_channel_ready = None;
- if messages_delivered == 0 {
- let (channel_ready, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let channel_id = if messages_delivered == 0 {
+ let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
as_channel_ready = Some(channel_ready);
// nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
// Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
// it before the channel_reestablish message.
+ chan_id
} else {
- create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- }
+ create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2
+ };
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
let events_2 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_2.len(), 1);
match events_2[0] {
- Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+ Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
assert_eq!(payment_hash_1, *payment_hash);
assert_eq!(amount_msat, 1_000_000);
+ assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+ assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
do_test_drop_messages_peer_disconnect(6, false);
}
-#[test]
-fn test_funding_peer_disconnect() {
- // Test that we can lock in our funding tx while disconnected
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- confirm_transaction(&nodes[0], &tx);
- let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
- assert!(events_1.is_empty());
-
- reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- confirm_transaction(&nodes[1], &tx);
- let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
- assert!(events_2.is_empty());
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
-
- // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
- let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events_3.len(), 1);
- let as_channel_ready = match events_3[0] {
- MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[1].node.get_our_node_id());
- msg.clone()
- },
- _ => panic!("Unexpected event {:?}", events_3[0]),
- };
-
- // nodes[1] received nodes[0]'s channel_ready on the first reconnect above, so it should send
- // announcement_signatures as well as channel_update.
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
- let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events_4.len(), 3);
- let chan_id;
- let bs_channel_ready = match events_4[0] {
- MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- chan_id = msg.channel_id;
- msg.clone()
- },
- _ => panic!("Unexpected event {:?}", events_4[0]),
- };
- let bs_announcement_sigs = match events_4[1] {
- MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- msg.clone()
- },
- _ => panic!("Unexpected event {:?}", events_4[1]),
- };
- match events_4[2] {
- MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
- assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- },
- _ => panic!("Unexpected event {:?}", events_4[2]),
- }
-
- // Re-deliver nodes[0]'s channel_ready, which nodes[1] can safely ignore. It currently
- // generates a duplicative private channel_update
- nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
- let events_5 = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events_5.len(), 1);
- match events_5[0] {
- MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
- assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- },
- _ => panic!("Unexpected event {:?}", events_5[0]),
- };
-
- // When we deliver nodes[1]'s channel_ready, however, nodes[0] will generate its
- // announcement_signatures.
- nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
- let events_6 = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events_6.len(), 1);
- let as_announcement_sigs = match events_6[0] {
- MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[1].node.get_our_node_id());
- msg.clone()
- },
- _ => panic!("Unexpected event {:?}", events_6[0]),
- };
- expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
- expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
-
- // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately
- // broadcast the channel announcement globally, as well as re-send its (now-public)
- // channel_update.
- nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
- let events_7 = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events_7.len(), 1);
- let (chan_announcement, as_update) = match events_7[0] {
- MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
- (msg.clone(), update_msg.clone())
- },
- _ => panic!("Unexpected event {:?}", events_7[0]),
- };
-
- // Finally, deliver nodes[0]'s announcement_signatures to nodes[1] and make sure it creates the
- // same channel_announcement.
- nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
- let events_8 = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events_8.len(), 1);
- let bs_update = match events_8[0] {
- MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
- assert_eq!(*msg, chan_announcement);
- update_msg.clone()
- },
- _ => panic!("Unexpected event {:?}", events_8[0]),
- };
-
- // Provide the channel announcement and public updates to the network graph
- nodes[0].gossip_sync.handle_channel_announcement(&chan_announcement).unwrap();
- nodes[0].gossip_sync.handle_channel_update(&bs_update).unwrap();
- nodes[0].gossip_sync.handle_channel_update(&as_update).unwrap();
-
- let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
- let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
- claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
-
- // Check that after deserialization and reconnection we can still generate an identical
- // channel_announcement from the cached signatures.
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: UserConfig::default(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].node = &nodes_0_deserialized;
- check_added_monitors!(nodes[0], 1);
-
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-}
-
#[test]
fn test_channel_ready_without_best_block_updated() {
// Previously, if we were offline when a funding transaction was locked in, and then we came
do_test_holding_cell_htlc_add_timeouts(true);
}
-#[test]
-fn test_no_txn_manager_serialize_deserialize() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let logger: test_utils::TestLogger;
- let fee_estimator: test_utils::TestFeeEstimator;
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id())
- .write(&mut chan_0_monitor_serialized).unwrap();
-
- logger = test_utils::TestLogger::new();
- fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let config = UserConfig::default();
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: &logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].node = &nodes_0_deserialized;
- assert_eq!(nodes[0].node.list_channels().len(), 1);
- check_added_monitors!(nodes[0], 1);
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
-
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-
- let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
- let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
- for node in nodes.iter() {
- assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
- node.gossip_sync.handle_channel_update(&as_update).unwrap();
- node.gossip_sync.handle_channel_update(&bs_update).unwrap();
- }
-
- send_payment(&nodes[0], &[&nodes[1]], 1000000);
-}
-
-#[test]
-fn test_manager_serialize_deserialize_events() {
- // This test makes sure the events field in ChannelManager survives de/serialization
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let fee_estimator: test_utils::TestFeeEstimator;
- let persister: test_utils::TestPersister;
- let logger: test_utils::TestLogger;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- // Start creating a channel, but stop right before broadcasting the funding transaction
- let channel_value = 100000;
- let push_msat = 10001;
- let a_flags = channelmanager::provided_init_features();
- let b_flags = channelmanager::provided_init_features();
- let node_a = nodes.remove(0);
- let node_b = nodes.remove(0);
- node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
- node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
- node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
-
- let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42);
-
- node_a.node.funding_transaction_generated(&temporary_channel_id, &node_b.node.get_our_node_id(), tx.clone()).unwrap();
- check_added_monitors!(node_a, 0);
-
- node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
- {
- let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), 1);
- assert_eq!(added_monitors[0].0, funding_output);
- added_monitors.clear();
- }
-
- let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id());
- node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &bs_funding_signed);
- {
- let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), 1);
- assert_eq!(added_monitors[0].0, funding_output);
- added_monitors.clear();
- }
- // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
-
- nodes.push(node_a);
- nodes.push(node_b);
-
- // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], bs_funding_signed.channel_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
- logger = test_utils::TestLogger::new();
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let config = UserConfig::default();
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: &logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].node = &nodes_0_deserialized;
-
- // After deserializing, make sure the funding_transaction is still held by the channel manager
- let events_4 = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events_4.len(), 0);
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
-
- // Make sure the channel is functioning as though the de/serialization never happened
- assert_eq!(nodes[0].node.list_channels().len(), 1);
- check_added_monitors!(nodes[0], 1);
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
-
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-
- let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
- let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
- for node in nodes.iter() {
- assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
- node.gossip_sync.handle_channel_update(&as_update).unwrap();
- node.gossip_sync.handle_channel_update(&bs_update).unwrap();
- }
-
- send_payment(&nodes[0], &[&nodes[1]], 1000000);
-}
-
-#[test]
-fn test_simple_manager_serialize_deserialize() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let logger: test_utils::TestLogger;
- let fee_estimator: test_utils::TestFeeEstimator;
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-
- let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
-
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- logger = test_utils::TestLogger::new();
- fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: UserConfig::default(),
- keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: &logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].node = &nodes_0_deserialized;
- check_added_monitors!(nodes[0], 1);
-
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-
- fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
- claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
-}
-
-#[test]
-fn test_manager_serialize_deserialize_inconsistent_monitor() {
- // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
- let chanmon_cfgs = create_chanmon_cfgs(4);
- let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
- let logger: test_utils::TestLogger;
- let fee_estimator: test_utils::TestFeeEstimator;
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-
- let mut node_0_stale_monitors_serialized = Vec::new();
- for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
- let mut writer = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap();
- node_0_stale_monitors_serialized.push(writer.0);
- }
-
- let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
-
- // Serialize the ChannelManager here, but the monitor we keep up-to-date
- let nodes_0_serialized = nodes[0].node.encode();
-
- route_payment(&nodes[0], &[&nodes[3]], 1000000);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
- // nodes[3])
- let mut node_0_monitors_serialized = Vec::new();
- for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
- let mut writer = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap();
- node_0_monitors_serialized.push(writer.0);
- }
-
- logger = test_utils::TestLogger::new();
- fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
-
-
- let mut node_0_stale_monitors = Vec::new();
- for serialized in node_0_stale_monitors_serialized.iter() {
- let mut read = &serialized[..];
- let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
- assert!(read.is_empty());
- node_0_stale_monitors.push(monitor);
- }
-
- let mut node_0_monitors = Vec::new();
- for serialized in node_0_monitors_serialized.iter() {
- let mut read = &serialized[..];
- let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
- assert!(read.is_empty());
- node_0_monitors.push(monitor);
- }
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- if let Err(msgs::DecodeError::InvalidValue) =
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: UserConfig::default(),
- keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: &logger,
- channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
- }) { } else {
- panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
- };
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let (_, nodes_0_deserialized_tmp) =
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: UserConfig::default(),
- keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: &logger,
- channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
- }).unwrap();
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- { // Channel close should result in a commitment tx
- let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(txn.len(), 1);
- check_spends!(txn[0], funding_tx);
- assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
- }
-
- for monitor in node_0_monitors.drain(..) {
- assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[0], 1);
- }
- nodes[0].node = &nodes_0_deserialized;
- check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
-
- // nodes[1] and nodes[2] have no lost state with nodes[0]...
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- //... and we can even still claim the payment!
- claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
-
- nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
- nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
- let mut found_err = false;
- for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
- if let MessageSendEvent::HandleError { ref action, .. } = msg_event {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.channel_id, channel_id);
- assert!(!found_err);
- found_err = true;
- },
- _ => panic!("Unexpected event!"),
- }
- }
- }
- assert!(found_err);
-}
-
macro_rules! check_spendable_outputs {
($node: expr, $keysinterface: expr) => {
{
} else { assert!(false); }
}
-fn do_test_data_loss_protect(reconnect_panicing: bool) {
- // When we get a data_loss_protect proving we're behind, we immediately panic as the
- // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
- // panic message informs the user they should force-close without broadcasting, which is tested
- // if `reconnect_panicing` is not set.
- let persister;
- let logger;
- let fee_estimator;
- let tx_broadcaster;
- let chain_source;
- let mut chanmon_cfgs = create_chanmon_cfgs(2);
- // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
- // during signing due to revoked tx
- chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- let monitor;
- let node_state_0;
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-
- // Cache node A state before any channel update
- let previous_node_state = nodes[0].node.encode();
- let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan.2).write(&mut previous_chain_monitor_state).unwrap();
-
- send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- // Restore node A from previous state
- logger = test_utils::TestLogger::with_id(format!("node {}", 0));
- let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
- chain_source = test_utils::TestChainSource::new(Network::Testnet);
- tx_broadcaster = test_utils::TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new())) };
- fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
- persister = test_utils::TestPersister::new();
- monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
- node_state_0 = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
- keys_manager: keys_manager,
- fee_estimator: &fee_estimator,
- chain_monitor: &monitor,
- logger: &logger,
- tx_broadcaster: &tx_broadcaster,
- default_config: UserConfig::default(),
- channel_monitors,
- }).unwrap().1
- };
- nodes[0].node = &node_state_0;
- assert_eq!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].chain_monitor = &monitor;
- nodes[0].chain_source = &chain_source;
-
- check_added_monitors!(nodes[0], 1);
-
- if reconnect_panicing {
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
-
- let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-
- // Check we close channel detecting A is fallen-behind
- // Check that we sent the warning message when we detected that A has fallen behind,
- // and give the possibility for A to recover from the warning.
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
- assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
-
- {
- let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
- // The node B should not broadcast the transaction to force close the channel!
- assert!(node_txn.is_empty());
- }
-
- let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
- // Check A panics upon seeing proof it has fallen behind.
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
- return; // By this point we should have panic'ed!
- }
-
- nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
- check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- {
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 0);
- }
-
- for msg in nodes[0].node.get_and_clear_pending_msg_events() {
- if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
- } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.data, "Channel force-closed");
- },
- _ => panic!("Unexpected event!"),
- }
- } else {
- panic!("Unexpected event {:?}", msg)
- }
- }
-
- // after the warning message sent by B, we should not able to
- // use the channel, or reconnect with success to the channel.
- assert!(nodes[0].node.list_usable_channels().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
-
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
- let mut err_msgs_0 = Vec::with_capacity(1);
- for msg in nodes[0].node.get_and_clear_pending_msg_events() {
- if let MessageSendEvent::HandleError { ref action, .. } = msg {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.data, "Failed to find corresponding channel");
- err_msgs_0.push(msg.clone());
- },
- _ => panic!("Unexpected event!"),
- }
- } else {
- panic!("Unexpected event!");
- }
- }
- assert_eq!(err_msgs_0.len(), 1);
- nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
- assert!(nodes[1].node.list_usable_channels().is_empty());
- check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
- check_closed_broadcast!(nodes[1], false);
-}
-
-#[test]
-#[should_panic]
-fn test_data_loss_protect_showing_stale_state_panics() {
- do_test_data_loss_protect(true);
-}
-
-#[test]
-fn test_force_close_without_broadcast() {
- do_test_data_loss_protect(false);
-}
-
#[test]
fn test_check_htlc_underpaying() {
// Send payment through A -> B but A is maliciously
do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
}
-#[test]
-fn test_forwardable_regen() {
- // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the
- // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive
- // HTLCs.
- // We test it for both payment receipt and payment forwarding.
-
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-
- // First send a payment to nodes[1]
- let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
-
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
-
- // Next send a payment which is forwarded by nodes[1]
- let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
- nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
-
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-
- // There is already a PendingHTLCsForwardable event "pending" so another one will not be
- // generated
- assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
-
- // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-
- let nodes_1_serialized = nodes[1].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[1], chan_id_1).write(&mut chan_0_monitor_serialized).unwrap();
- get_monitor!(nodes[1], chan_id_2).write(&mut chan_1_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[1].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
- nodes[1].chain_monitor = &new_chain_monitor;
-
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
- let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..];
- let (_, mut chan_1_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_1_monitor_read, keys_manager).unwrap();
- assert!(chan_1_monitor_read.is_empty());
-
- let mut nodes_1_read = &nodes_1_serialized[..];
- let (_, nodes_1_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- channel_monitors.insert(chan_1_monitor.get_funding_txo().0, &mut chan_1_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
- default_config: UserConfig::default(),
- keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: nodes[1].chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_1_deserialized = nodes_1_deserialized_tmp;
- assert!(nodes_1_read.is_empty());
-
- assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- assert_eq!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[1].node = &nodes_1_deserialized;
- check_added_monitors!(nodes[1], 2);
-
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
- // the commitment state.
- reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
- expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
- check_added_monitors!(nodes[1], 1);
-
- let mut events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
- commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
- expect_pending_htlcs_forwardable!(nodes[2]);
- expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
-
- claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
- claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
-}
-
fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
}
-fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
- // Test what happens if a node receives an MPP payment, claims it, but crashes before
- // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
- // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
- // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
- // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
- // not have the preimage tied to the still-pending HTLC.
- //
- // To get to the correct state, on startup we should propagate the preimage to the
- // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
- // receiving the preimage without a state update.
- //
- // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was
- // definitely claimed.
- let chanmon_cfgs = create_chanmon_cfgs(4);
- let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
-
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
-
- let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
-
- create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-
- // Create an MPP route for 15k sats, more than the default htlc-max of 10%
- let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
- assert_eq!(route.paths.len(), 2);
- route.paths.sort_by(|path_a, _| {
- // Sort the path so that the path through nodes[1] comes first
- if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
- core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
- });
-
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
- check_added_monitors!(nodes[0], 2);
-
- // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
- let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(send_events.len(), 2);
- do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
- do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
-
- // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
- // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
- let mut original_monitor = test_utils::TestVecWriter(Vec::new());
- if !persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
- assert!(original_monitor.0.is_empty());
- nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
- }
- }
- }
-
- let mut original_manager = test_utils::TestVecWriter(Vec::new());
- nodes[3].node.write(&mut original_manager).unwrap();
-
- expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
-
- nodes[3].node.claim_funds(payment_preimage);
- check_added_monitors!(nodes[3], 2);
- expect_payment_claimed!(nodes[3], payment_hash, 15_000_000);
-
- // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
- // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
- // with the old ChannelManager.
- let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_persisted {
- assert!(updated_monitor.0.is_empty());
- nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
- }
- }
- // If `persist_both_monitors` is set, get the second monitor here as well
- if persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
- assert!(original_monitor.0.is_empty());
- nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
- }
- }
- }
-
- // Now restart nodes[3].
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[3].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
- nodes[3].chain_monitor = &new_chain_monitor;
- let mut monitors = Vec::new();
- for mut monitor_data in [original_monitor, updated_monitor].iter() {
- let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
- monitors.push(deserialized_monitor);
- }
-
- let config = UserConfig::default();
- nodes_3_deserialized = {
- let mut channel_monitors = HashMap::new();
- for monitor in monitors.iter_mut() {
- channel_monitors.insert(monitor.get_funding_txo().0, monitor);
- }
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
- default_config: config,
- keys_manager,
- fee_estimator: node_cfgs[3].fee_estimator,
- chain_monitor: nodes[3].chain_monitor,
- tx_broadcaster: nodes[3].tx_broadcaster.clone(),
- logger: nodes[3].logger,
- channel_monitors,
- }).unwrap().1
- };
- nodes[3].node = &nodes_3_deserialized;
-
- for monitor in monitors {
- // On startup the preimage should have been copied into the non-persisted monitor:
- assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
- assert_eq!(nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor),
- ChannelMonitorUpdateStatus::Completed);
- }
- check_added_monitors!(nodes[3], 2);
-
- nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
-
- // During deserialization, we should have closed one channel and broadcast its latest
- // commitment transaction. We should also still have the original PaymentReceived event we
- // never finished processing.
- let events = nodes[3].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
- if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
- if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
- if persist_both_monitors {
- if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
- }
-
- // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
- // ChannelManager prior to handling the original one.
- if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
- events[if persist_both_monitors { 3 } else { 2 }]
- {
- assert_eq!(payment_hash, our_payment_hash);
- } else { panic!(); }
-
- assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
- if !persist_both_monitors {
- // If one of the two channels is still live, reveal the payment preimage over it.
-
- nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
- nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
-
- nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
- get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
- assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
-
- nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
-
- // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
- // claim should fly.
- let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
- check_added_monitors!(nodes[3], 1);
- assert_eq!(ds_msgs.len(), 2);
- if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
-
- let cs_updates = match ds_msgs[0] {
- MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
- nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
- check_added_monitors!(nodes[2], 1);
- let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
- expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
- commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
- cs_updates
- }
- _ => panic!(),
- };
-
- nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
- expect_payment_sent!(nodes[0], payment_preimage);
- }
-}
-
-#[test]
-fn test_partial_claim_before_restart() {
- do_test_partial_claim_before_restart(false);
- do_test_partial_claim_before_restart(true);
-}
-
/// The possible events which may trigger a `max_dust_htlc_exposure` breach
#[derive(Clone, Copy, PartialEq)]
enum ExposureEvent {
assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
}
+
+#[test]
+fn accept_busted_but_better_fee() {
+ // If a peer sends us a fee update that is too low, but higher than our previous channel
+ // feerate, we should accept it. In the future we may want to consider closing the channel
+ // later, but for now we only accept the update.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ // Set nodes[1] to expect 5,000 sat/kW.
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = 5000;
+ }
+
+ // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = 1000;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+ commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
+ // it.
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = 2000;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+ commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
+ // channel.
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = 1000;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
+ err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ },
+ _ => panic!("Unexpected event"),
+ };
+}
use bitcoin::hashes::cmp::fixed_time_eq;
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
-use crate::chain::keysinterface::{KeyMaterial, KeysInterface, Sign};
+use crate::chain::keysinterface::{KeyMaterial, KeysInterface};
use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::msgs;
use crate::ln::msgs::MAX_VALUE_MSAT;
/// `current_time` is a Unix timestamp representing the current time.
///
/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
-pub fn create<Signer: Sign, K: Deref>(keys: &ExpandedKey, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, keys_manager: &K, current_time: u64) -> Result<(PaymentHash, PaymentSecret), ()>
- where K::Target: KeysInterface<Signer = Signer>
+pub fn create<K: Deref>(keys: &ExpandedKey, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, keys_manager: &K, current_time: u64) -> Result<(PaymentHash, PaymentSecret), ()>
+ where K::Target: KeysInterface
{
let metadata_bytes = construct_metadata_bytes(min_value_msat, Method::LdkPaymentHash, invoice_expiry_delta_secs, current_time)?;
mod reorg_tests;
#[cfg(test)]
#[allow(unused_mut)]
+mod reload_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
mod onion_route_tests;
#[cfg(test)]
#[allow(unused_mut)]
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
assert_eq!(Vec::<Balance>::new(),
nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ for node in nodes.iter() {
+ connect_blocks(node, 6);
+ connect_blocks(node, 6);
+ assert!(node.chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+ }
}
#[test]
connect_blocks(&nodes[0], node_a_htlc_claimable - nodes[0].best_block_info().1);
assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
test_spendable_output(&nodes[0], &as_txn[1]);
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ connect_blocks(&nodes[0], 6);
+ connect_blocks(&nodes[0], 6);
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
}
#[test]
connect_blocks(&nodes[1], 1);
assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ connect_blocks(&nodes[1], 6);
+ connect_blocks(&nodes[1], 6);
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
}
fn sorted_vec_with_additions<T: Ord + Clone>(v_orig: &Vec<T>, extra_ts: &[&T]) -> Vec<T> {
test_spendable_output(&nodes[1], &claim_txn[1]);
expect_payment_failed!(nodes[1], timeout_payment_hash, false);
assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ connect_blocks(&nodes[1], 6);
+ connect_blocks(&nodes[1], 6);
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
}
#[test]
test_spendable_output(&nodes[0], &as_second_htlc_claim_tx[1]);
assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ connect_blocks(&nodes[0], 6);
+ connect_blocks(&nodes[0], 6);
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
}
#[test]
expect_payment_failed!(nodes[1], revoked_payment_hash, false);
test_spendable_output(&nodes[1], &claim_txn_2[0]);
assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+
+ // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're
+ // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new
+ // monitor events or claimable balances.
+ connect_blocks(&nodes[1], 6);
+ connect_blocks(&nodes[1], 6);
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
}
use crate::util::events::{MessageSendEventsProvider, OnionMessageProvider};
use crate::util::logger;
-use crate::util::ser::{BigSize, LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname};
+use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
}
pub(crate) enum OnionHopDataFormat {
- Legacy { // aka Realm-0
- short_channel_id: u64,
- },
NonFinalNode {
short_channel_id: u64,
},
/// Message serialization may panic if this value is more than 21 million Bitcoin.
pub(crate) amt_to_forward: u64,
pub(crate) outgoing_cltv_value: u32,
- // 12 bytes of 0-padding for Legacy format
}
pub struct DecodedOnionErrorPacket {
impl Writeable for OnionHopData {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match self.format {
- OnionHopDataFormat::Legacy { short_channel_id } => {
- 0u8.write(w)?;
- short_channel_id.write(w)?;
- self.amt_to_forward.write(w)?;
- self.outgoing_cltv_value.write(w)?;
- w.write_all(&[0;12])?;
- },
OnionHopDataFormat::NonFinalNode { short_channel_id } => {
encode_varint_length_prefixed_tlv!(w, {
(2, HighZeroBytesDroppedBigSize(self.amt_to_forward), required),
impl Readable for OnionHopData {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
- let b: BigSize = Readable::read(r)?;
- const LEGACY_ONION_HOP_FLAG: u64 = 0;
- let (format, amt, cltv_value) = if b.0 != LEGACY_ONION_HOP_FLAG {
- let mut rd = FixedLengthReader::new(r, b.0);
- let mut amt = HighZeroBytesDroppedBigSize(0u64);
- let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
- let mut short_id: Option<u64> = None;
- let mut payment_data: Option<FinalOnionHopData> = None;
- let mut keysend_preimage: Option<PaymentPreimage> = None;
- decode_tlv_stream!(&mut rd, {
- (2, amt, required),
- (4, cltv_value, required),
- (6, short_id, option),
- (8, payment_data, option),
- // See https://github.com/lightning/blips/blob/master/blip-0003.md
- (5482373484, keysend_preimage, option)
- });
- rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
- let format = if let Some(short_channel_id) = short_id {
- if payment_data.is_some() { return Err(DecodeError::InvalidValue); }
- OnionHopDataFormat::NonFinalNode {
- short_channel_id,
- }
- } else {
- if let &Some(ref data) = &payment_data {
- if data.total_msat > MAX_VALUE_MSAT {
- return Err(DecodeError::InvalidValue);
- }
- }
- OnionHopDataFormat::FinalNode {
- payment_data,
- keysend_preimage,
- }
- };
- (format, amt.0, cltv_value.0)
+ let mut amt = HighZeroBytesDroppedBigSize(0u64);
+ let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
+ let mut short_id: Option<u64> = None;
+ let mut payment_data: Option<FinalOnionHopData> = None;
+ let mut keysend_preimage: Option<PaymentPreimage> = None;
+ read_tlv_fields!(r, {
+ (2, amt, required),
+ (4, cltv_value, required),
+ (6, short_id, option),
+ (8, payment_data, option),
+ // See https://github.com/lightning/blips/blob/master/blip-0003.md
+ (5482373484, keysend_preimage, option)
+ });
+
+ let format = if let Some(short_channel_id) = short_id {
+ if payment_data.is_some() { return Err(DecodeError::InvalidValue); }
+ OnionHopDataFormat::NonFinalNode {
+ short_channel_id,
+ }
} else {
- let format = OnionHopDataFormat::Legacy {
- short_channel_id: Readable::read(r)?,
- };
- let amt: u64 = Readable::read(r)?;
- let cltv_value: u32 = Readable::read(r)?;
- r.read_exact(&mut [0; 12])?;
- (format, amt, cltv_value)
+ if let &Some(ref data) = &payment_data {
+ if data.total_msat > MAX_VALUE_MSAT {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ OnionHopDataFormat::FinalNode {
+ payment_data,
+ keysend_preimage,
+ }
};
- if amt > MAX_VALUE_MSAT {
+ if amt.0 > MAX_VALUE_MSAT {
return Err(DecodeError::InvalidValue);
}
Ok(OnionHopData {
format,
- amt_to_forward: amt,
- outgoing_cltv_value: cltv_value,
+ amt_to_forward: amt.0,
+ outgoing_cltv_value: cltv_value.0,
})
}
}
assert_eq!(encoded_value, target_value);
}
- #[test]
- fn encoding_legacy_onion_hop_data() {
- let msg = msgs::OnionHopData {
- format: OnionHopDataFormat::Legacy {
- short_channel_id: 0xdeadbeef1bad1dea,
- },
- amt_to_forward: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
- };
- let encoded_value = msg.encode();
- let target_value = hex::decode("00deadbeef1bad1dea0badf00d01020304ffffffff000000000000000000000000").unwrap();
- assert_eq!(encoded_value, target_value);
- }
-
#[test]
fn encoding_nonfinal_onion_hop_data() {
let mut msg = msgs::OnionHopData {
//! These tests work by standing up full nodes and route payments across the network, checking the
//! returned errors decode to the correct thing.
-use crate::chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::keysinterface::{KeysInterface, Recipient};
use crate::ln::{PaymentHash, PaymentSecret};
use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
-use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, HTLCForwardInfo, CLTV_FAR_FAR_AWAY, MIN_CLTV_EXPIRY_DELTA, PendingHTLCInfo, PendingHTLCRouting, PaymentId};
+use crate::ln::channelmanager::{self, HTLCForwardInfo, CLTV_FAR_FAR_AWAY, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId};
use crate::ln::onion_utils;
-use crate::routing::gossip::{NetworkUpdate, RoutingFees, NodeId};
+use crate::routing::gossip::{NetworkUpdate, RoutingFees};
use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop};
-use crate::ln::features::{InitFeatures, InvoiceFeatures, NodeFeatures};
+use crate::ln::features::{InitFeatures, InvoiceFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
use crate::ln::wire::Encode;
use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
-use crate::util::ser::{ReadableArgs, Writeable, Writer};
+use crate::util::ser::{Writeable, Writer};
use crate::util::{byte_utils, test_utils};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::errors::APIError;
// break the first (non-final) hop payload by swapping the realm (0) byte for a byte
// describing a length-1 TLV payload, which is obviously bogus.
new_payloads[0].data[0] = 1;
- msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
+ msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id));
// final node failure
// break the last-hop payload by swapping the realm (0) byte for a byte describing a
// length-1 TLV payload, which is obviously bogus.
new_payloads[1].data[0] = 1;
- msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
+ msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id));
// the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() {
for f in pending_forwards.iter_mut() {
match f {
- &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
+ &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) =>
forward_info.outgoing_cltv_value += 1,
_ => {},
}
for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() {
for f in pending_forwards.iter_mut() {
match f {
- &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
- forward_info.amt_to_forward -= 1,
+ &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) =>
+ forward_info.outgoing_amt_msat -= 1,
_ => {},
}
}
config.channel_handshake_limits.force_announced_channel_preference = false;
config.accept_forwards_to_priv_channels = !announced_channel;
let chanmon_cfgs = create_chanmon_cfgs(3);
+ let persister;
+ let chain_monitor;
+ let channel_manager_1_deserialized;
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(config), None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
// To test persistence of the updated config, we'll re-initialize the ChannelManager.
let config_after_restart = {
- let persister = test_utils::TestPersister::new();
- let chain_monitor = test_utils::TestChainMonitor::new(
- Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger,
- node_cfgs[1].fee_estimator, &persister, nodes[1].keys_manager,
- );
-
- let mut chanmon_1 = <(_, ChannelMonitor<_>)>::read(
- &mut &get_monitor!(nodes[1], other_channel.3).encode()[..], nodes[1].keys_manager,
- ).unwrap().1;
- let mut chanmon_2 = <(_, ChannelMonitor<_>)>::read(
- &mut &get_monitor!(nodes[1], channel_to_update.0).encode()[..], nodes[1].keys_manager,
- ).unwrap().1;
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chanmon_1.get_funding_txo().0, &mut chanmon_1);
- channel_monitors.insert(chanmon_2.get_funding_txo().0, &mut chanmon_2);
-
- let chanmgr = <(_, ChannelManager<_, _, _, _, _>)>::read(
- &mut &nodes[1].node.encode()[..], ChannelManagerReadArgs {
- default_config: *nodes[1].node.get_current_default_configuration(),
- keys_manager: nodes[1].keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: &chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors: channel_monitors,
- },
- ).unwrap().1;
- chanmgr.list_channels().iter()
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], other_channel.3).encode();
+ let chan_2_monitor_serialized = get_monitor!(nodes[1], channel_to_update.0).encode();
+ reload_node!(nodes[1], *nodes[1].node.get_current_default_configuration(), &nodes[1].node.encode(),
+ &[&chan_1_monitor_serialized, &chan_2_monitor_serialized], persister, chain_monitor, channel_manager_1_deserialized);
+ nodes[1].node.list_channels().iter()
.find(|channel| channel.channel_id == channel_to_update.0).unwrap()
.config.unwrap()
};
}
#[test]
-fn test_default_to_onion_payload_tlv_format() {
- // Tests that we default to creating tlv format onion payloads when no `NodeAnnouncementInfo`
- // `features` for a node in the `network_graph` exists, or when the node isn't in the
- // `network_graph`, and no other known `features` for the node exists.
- let mut priv_channels_conf = UserConfig::default();
- priv_channels_conf.channel_handshake_config.announced_channel = false;
- let chanmon_cfgs = create_chanmon_cfgs(5);
- let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, Some(priv_channels_conf)]);
- let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs);
-
- create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_unannounced_chan_between_nodes_with_value(&nodes, 3, 4, 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-
- let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id());
- let origin_node = &nodes[0];
- let network_graph = origin_node.network_graph;
-
- // Clears all the `NodeAnnouncementInfo` for all nodes of `nodes[0]`'s `network_graph`, so that
- // their `features` aren't used when creating the `route`.
- network_graph.clear_nodes_announcement_info();
-
- let (announced_route, _, _, _) = get_route_and_payment_hash!(
- origin_node, nodes[3], payment_params, 10_000, TEST_FINAL_CLTV);
-
- let hops = &announced_route.paths[0];
- // Assert that the hop between `nodes[1]` and `nodes[2]` defaults to supporting variable length
- // onions, as `nodes[0]` has no `NodeAnnouncementInfo` `features` for `node[2]`
- assert!(hops[1].node_features.supports_variable_length_onion());
- // Assert that the hop between `nodes[2]` and `nodes[3]` defaults to supporting variable length
- // onions, as `nodes[0]` has no `NodeAnnouncementInfo` `features` for `node[3]`, and no `InvoiceFeatures`
- // for the `payment_params`, which would otherwise have been used.
- assert!(hops[2].node_features.supports_variable_length_onion());
- // Note that we do not assert that `hops[0]` (the channel between `nodes[0]` and `nodes[1]`)
- // supports variable length onions, as the `InitFeatures` exchanged in the init message
- // between the nodes will be used when creating the route. We therefore do not default to
- // supporting variable length onions for that hop, as the `InitFeatures` in this case are
- // `channelmanager::provided_init_features()`.
-
- let unannounced_chan = &nodes[4].node.list_usable_channels()[0];
-
- let last_hop = RouteHint(vec![RouteHintHop {
- src_node_id: nodes[3].node.get_our_node_id(),
- short_channel_id: unannounced_chan.short_channel_id.unwrap(),
- fees: RoutingFees {
- base_msat: 0,
- proportional_millionths: 0,
- },
- cltv_expiry_delta: 42,
- htlc_minimum_msat: None,
- htlc_maximum_msat: None,
- }]);
-
- let unannounced_chan_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id()).with_route_hints(vec![last_hop]);
- let (unannounced_route, _, _, _) = get_route_and_payment_hash!(
- origin_node, nodes[4], unannounced_chan_params, 10_000, TEST_FINAL_CLTV);
-
- let unannounced_chan_hop = &unannounced_route.paths[0][3];
- // Ensure that `nodes[4]` doesn't exist in `nodes[0]`'s `network_graph`, as it's not public.
- assert!(&network_graph.read_only().nodes().get(&NodeId::from_pubkey(&nodes[4].node.get_our_node_id())).is_none());
- // Assert that the hop between `nodes[3]` and `nodes[4]` defaults to supporting variable length
- // onions, even though `nodes[4]` as `nodes[0]` doesn't exists in `nodes[0]`'s `network_graph`,
- // and no `InvoiceFeatures` for the `payment_params` exists, which would otherwise have been
- // used.
- assert!(unannounced_chan_hop.node_features.supports_variable_length_onion());
-
- let cur_height = nodes[0].best_block_info().1 + 1;
- let (announced_route_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&announced_route.paths[0], 40000, &None, cur_height, &None).unwrap();
- let (unannounced_route_paylods, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&unannounced_route.paths[0], 40000, &None, cur_height, &None).unwrap();
-
- for onion_payloads in vec![announced_route_payloads, unannounced_route_paylods] {
- for onion_payload in onion_payloads.iter() {
- match onion_payload.format {
- msgs::OnionHopDataFormat::Legacy {..} => {
- panic!("Generated a `msgs::OnionHopDataFormat::Legacy` payload, even though that shouldn't have happend.");
- }
- _ => {}
- }
- }
- }
-}
+fn test_always_create_tlv_format_onion_payloads() {
+ // Verify that we always generate tlv onion format payloads, even if the features specifically
+ // specifies no support for variable length onions, as the legacy payload format has been
+ // deprecated in BOLT4.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
-#[test]
-fn test_do_not_default_to_onion_payload_tlv_format_when_unsupported() {
- // Tests that we do not default to creating tlv onions if either of these types features
- // exists, which specifies no support for variable length onions for a specific hop, when
- // creating a route:
- // 1. `InitFeatures` to the counterparty node exchanged with the init message to the node.
- // 2. `NodeFeatures` in the `NodeAnnouncementInfo` of a node in sender node's `network_graph`.
- // 3. `InvoiceFeatures` specified by the receiving node, when no `NodeAnnouncementInfo`
- // `features` exists for the receiver in the sender's `network_graph`.
- let chanmon_cfgs = create_chanmon_cfgs(4);
- let mut node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
-
- // Set `node[1]` config to `InitFeatures::empty()` + `static_remote_key` which implies
- // `!supports_variable_length_onion()` but still supports the required static-remote-key
- // feature.
+ // Set `node[1]`'s config features to features which return `false` for
+ // `supports_variable_length_onion()`
+ let mut no_variable_length_onion_features = InitFeatures::empty();
+ no_variable_length_onion_features.set_static_remote_key_required();
let mut node_1_cfg = &mut node_cfgs[1];
- node_1_cfg.features = InitFeatures::empty();
- node_1_cfg.features.set_static_remote_key_required();
+ node_1_cfg.features = no_variable_length_onion_features;
- let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
- let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::empty(), InitFeatures::empty());
+ create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::empty(), InitFeatures::empty());
- let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
.with_features(InvoiceFeatures::empty());
- let origin_node = &nodes[0];
- let network_graph = origin_node.network_graph;
- network_graph.clear_nodes_announcement_info();
-
- // Set `NodeAnnouncementInfo` `features` which do not support variable length onions for
- // `nodes[2]` in `nodes[0]`'s `network_graph`.
- let nodes_2_unsigned_node_announcement = msgs::UnsignedNodeAnnouncement {
- features: NodeFeatures::empty(),
- timestamp: 0,
- node_id: nodes[2].node.get_our_node_id(),
- rgb: [32; 3],
- alias: [16;32],
- addresses: Vec::new(),
- excess_address_data: Vec::new(),
- excess_data: Vec::new(),
- };
- let _res = network_graph.update_node_from_unsigned_announcement(&nodes_2_unsigned_node_announcement);
-
- let (route, _, _, _) = get_route_and_payment_hash!(
- origin_node, nodes[3], payment_params, 10_000, TEST_FINAL_CLTV);
+ let (route, _payment_hash, _payment_preimage, _payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 40000, TEST_FINAL_CLTV);
let hops = &route.paths[0];
-
- // Assert that the hop between `nodes[0]` and `nodes[1]` doesn't support variable length
- // onions, as as the `InitFeatures` exchanged (`InitFeatures::empty()`) in the init message
- // between the nodes when setting up the channel is used when creating the `route` and that we
- // therefore do not default to supporting variable length onions. Despite `nodes[0]` having no
- // `NodeAnnouncementInfo` `features` for `node[1]`.
+ // Asserts that the first hop to `node[1]` signals no support for variable length onions.
assert!(!hops[0].node_features.supports_variable_length_onion());
- // Assert that the hop between `nodes[1]` and `nodes[2]` uses the `features` from
- // `nodes_2_unsigned_node_announcement` that doesn't support variable length onions.
+ // Asserts that the first hop to `node[1]` signals no support for variable length onions.
assert!(!hops[1].node_features.supports_variable_length_onion());
- // Assert that the hop between `nodes[2]` and `nodes[3]` uses the `InvoiceFeatures` set to the
- // `payment_params`, that doesn't support variable length onions. We therefore do not end up
- // defaulting to supporting variable length onions, despite `nodes[0]` having no
- // `NodeAnnouncementInfo` `features` for `node[3]`.
- assert!(!hops[2].node_features.supports_variable_length_onion());
let cur_height = nodes[0].best_block_info().1 + 1;
let (onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 40000, &None, cur_height, &None).unwrap();
- for onion_payload in onion_payloads.iter() {
- match onion_payload.format {
- msgs::OnionHopDataFormat::Legacy {..} => {}
- _ => {
- panic!("Should have only have generated `msgs::OnionHopDataFormat::Legacy` payloads");
- }
- }
+ match onion_payloads[0].format {
+ msgs::OnionHopDataFormat::NonFinalNode {..} => {},
+ _ => { panic!(
+ "Should have generated a `msgs::OnionHopDataFormat::NonFinalNode` payload for `hops[0]`,
+ despite that the features signals no support for variable length onions"
+ )}
+ }
+ match onion_payloads[1].format {
+ msgs::OnionHopDataFormat::FinalNode {..} => {},
+ _ => {panic!(
+ "Should have generated a `msgs::OnionHopDataFormat::FinalNode` payload for `hops[1]`,
+ despite that the features signals no support for variable length onions"
+ )}
}
}
let mut forward_htlcs = nodes[1].node.forward_htlcs.lock().unwrap();
let mut pending_forward = forward_htlcs.get_mut(&phantom_scid).unwrap();
match pending_forward[0] {
- HTLCForwardInfo::AddHTLC {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
forward_info: PendingHTLCInfo {
routing: PendingHTLCRouting::Forward { ref mut onion_packet, .. },
..
}, ..
- } => {
+ }) => {
onion_packet.hmac[onion_packet.hmac.len() - 1] ^= 1;
Sha256::hash(&onion_packet.hop_data).into_inner().to_vec()
},
for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() {
for f in pending_forwards.iter_mut() {
match f {
- &mut HTLCForwardInfo::AddHTLC {
+ &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
forward_info: PendingHTLCInfo {
routing: PendingHTLCRouting::Forward { ref mut onion_packet, .. },
..
}, ..
- } => {
+ }) => {
// Construct the onion payloads for the entire route and an invalid amount.
let height = nodes[0].best_block_info().1;
let session_priv = SecretKey::from_slice(&session_priv).unwrap();
for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() {
for f in pending_forwards.iter_mut() {
match f {
- &mut HTLCForwardInfo::AddHTLC {
+ &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
forward_info: PendingHTLCInfo { ref mut outgoing_cltv_value, .. }, ..
- } => {
+ }) => {
*outgoing_cltv_value += 1;
},
_ => panic!("Unexpected forward"),
nodes[1].node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat);
+ expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat, None, route.paths[0].last().unwrap().pubkey);
nodes[1].node.fail_htlc_backwards(&payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
Hmac::from_engine(hmac).into_inner()
}
+#[cfg(test)]
+#[inline]
+pub(super) fn gen_pad_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
+ assert_eq!(shared_secret.len(), 32);
+ let mut hmac = HmacEngine::<Sha256>::new(&[0x70, 0x61, 0x64]); // pad
+ hmac.input(&shared_secret);
+ Hmac::from_engine(hmac).into_inner()
+}
+
pub(crate) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
let blinding_factor = {
let mut sha = Sha256::engine();
let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
res.insert(0, msgs::OnionHopData {
- format: if hop.node_features.supports_variable_length_onion() {
- if idx == 0 {
- msgs::OnionHopDataFormat::FinalNode {
- payment_data: if let &Some(ref payment_secret) = payment_secret_option {
- Some(msgs::FinalOnionHopData {
- payment_secret: payment_secret.clone(),
- total_msat,
- })
- } else { None },
- keysend_preimage: *keysend_preimage,
- }
- } else {
- msgs::OnionHopDataFormat::NonFinalNode {
- short_channel_id: last_short_channel_id,
- }
+ format: if idx == 0 {
+ msgs::OnionHopDataFormat::FinalNode {
+ payment_data: if let &Some(ref payment_secret) = payment_secret_option {
+ Some(msgs::FinalOnionHopData {
+ payment_secret: payment_secret.clone(),
+ total_msat,
+ })
+ } else { None },
+ keysend_preimage: *keysend_preimage,
}
} else {
- msgs::OnionHopDataFormat::Legacy {
+ msgs::OnionHopDataFormat::NonFinalNode {
short_channel_id: last_short_channel_id,
}
},
}
#[cfg(test)]
-// Used in testing to write bogus OnionHopDatas, which is otherwise not representable in
-// msgs::OnionHopData.
-pub(super) fn construct_onion_packet_bogus_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
+/// Used in testing to write bogus `BogusOnionHopData` as well as `RawOnionHopData`, which is
+/// otherwise not representable in `msgs::OnionHopData`.
+pub(super) fn construct_onion_packet_with_writable_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
let mut packet_data = [0; ONION_DATA_LEN];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
use crate::ln::features::{ChannelFeatures, NodeFeatures};
use crate::routing::router::{Route, RouteHop};
use crate::ln::msgs;
- use crate::util::ser::{Writeable, Writer};
+ use crate::util::ser::{Writeable, Writer, VecWriter};
use hex;
use super::OnionKeys;
+ fn get_test_session_key() -> SecretKey {
+ SecretKey::from_slice(&hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap()
+ }
+
fn build_test_onion_keys() -> Vec<OnionKeys> {
// Keys from BOLT 4, used in both test vector tests
let secp_ctx = Secp256k1::new();
RouteHop {
pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
+ short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
},
RouteHop {
pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
+ short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
},
RouteHop {
pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
+ short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
},
RouteHop {
pubkey: PublicKey::from_slice(&hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(),
channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
+ short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
},
RouteHop {
pubkey: PublicKey::from_slice(&hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(),
channel_features: ChannelFeatures::empty(), node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
+ short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
},
]],
payment_params: None,
};
- let session_priv = SecretKey::from_slice(&hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap();
-
- let onion_keys = super::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
+ let onion_keys = super::construct_onion_keys(&secp_ctx, &route.paths[0], &get_test_session_key()).unwrap();
assert_eq!(onion_keys.len(), route.paths[0].len());
onion_keys
}
#[test]
fn onion_vectors() {
- // Legacy packet creation test vectors from BOLT 4
let onion_keys = build_test_onion_keys();
+ // Test generation of ephemeral keys and secrets. These values used to be part of the BOLT4
+ // test vectors, but have since been removed. We keep them as they provide test coverage.
assert_eq!(onion_keys[0].shared_secret.secret_bytes(), hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
assert_eq!(onion_keys[4].mu, hex::decode("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
- // Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
+ // Packet creation test vectors from BOLT 4 (see
+ // https://github.com/lightning/bolts/blob/16973e2b857e853308cafd59e42fa830d75b1642/bolt04/onion-test.json).
+ // Note that we represent the test vector payloads 2 and 5 through RawOnionHopData::data
+ // with raw hex instead of our in-memory enums, as the payloads contains custom types, and
+ // we have no way of representing that with our enums.
let payloads = vec!(
- msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0,
- },
- amt_to_forward: 0,
- outgoing_cltv_value: 0,
- },
- msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0x0101010101010101,
- },
- amt_to_forward: 0x0100000001,
- outgoing_cltv_value: 0,
- },
- msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0x0202020202020202,
+ RawOnionHopData::new(msgs::OnionHopData {
+ format: msgs::OnionHopDataFormat::NonFinalNode {
+ short_channel_id: 1,
},
- amt_to_forward: 0x0200000002,
- outgoing_cltv_value: 0,
+ amt_to_forward: 15000,
+ outgoing_cltv_value: 1500,
+ }),
+ /*
+ The second payload is represented by raw hex as it contains custom type data. Content:
+ 1. length "52" (payload_length 82).
+
+ The first part of the payload has the `NonFinalNode` format, with content as follows:
+ 2. amt_to_forward "020236b0"
+ 02 (type amt_to_forward) 02 (length 2) 36b0 (value 14000)
+ 3. outgoing_cltv_value "04020578"
+ 04 (type outgoing_cltv_value) 02 (length 2) 0578 (value 1400)
+ 4. short_channel_id "06080000000000000002"
+ 06 (type short_channel_id) 08 (length 8) 0000000000000002 (value 2)
+
+ The rest of the payload is custom type data:
+ 5. custom_record "fd02013c0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f"
+ */
+ RawOnionHopData {
+ data: hex::decode("52020236b00402057806080000000000000002fd02013c0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f0102030405060708090a0b0c0d0e0f").unwrap(),
},
- msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0x0303030303030303,
+ RawOnionHopData::new(msgs::OnionHopData {
+ format: msgs::OnionHopDataFormat::NonFinalNode {
+ short_channel_id: 3,
},
- amt_to_forward: 0x0300000003,
- outgoing_cltv_value: 0,
- },
- msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0x0404040404040404,
+ amt_to_forward: 12500,
+ outgoing_cltv_value: 1250,
+ }),
+ RawOnionHopData::new(msgs::OnionHopData {
+ format: msgs::OnionHopDataFormat::NonFinalNode {
+ short_channel_id: 4,
},
- amt_to_forward: 0x0400000004,
- outgoing_cltv_value: 0,
+ amt_to_forward: 10000,
+ outgoing_cltv_value: 1000,
+ }),
+ /*
+ The fifth payload is represented by raw hex as it contains custom type data. Content:
+ 1. length "fd0110" (payload_length 272).
+
+ The first part of the payload has the `FinalNode` format, with content as follows:
+ 1. amt_to_forward "02022710"
+ 02 (type amt_to_forward) 02 (length 2) 2710 (value 10000)
+ 2. outgoing_cltv_value "040203e8"
+ 04 (type outgoing_cltv_value) 02 (length 2) 03e8 (value 1000)
+ 3. payment_data "082224a33562c54507a9334e79f0dc4f17d407e6d7c61f0e2f3d0d38599502f617042710"
+ 08 (type short_channel_id) 22 (length 34) 24a33562c54507a9334e79f0dc4f17d407e6d7c61f0e2f3d0d38599502f61704 (payment_secret) 2710 (total_msat value 10000)
+
+ The rest of the payload is custom type data:
+ 4. custom_record "fd012de02a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"
+ */
+ RawOnionHopData {
+ data: hex::decode("fd011002022710040203e8082224a33562c54507a9334e79f0dc4f17d407e6d7c61f0e2f3d0d38599502f617042710fd012de02a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a").unwrap(),
},
);
- let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
- // Just check the final packet encoding, as it includes all the per-hop vectors in it
- // anyway...
- assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
+ // Verify that the serialized OnionHopDataFormat::NonFinalNode tlv payloads matches the test vectors
+ let mut w = VecWriter(Vec::new());
+ payloads[0].write(&mut w).unwrap();
+ let hop_1_serialized_payload = w.0;
+ let expected_serialized_hop_1_payload = &hex::decode("1202023a98040205dc06080000000000000001").unwrap()[..];
+ assert_eq!(hop_1_serialized_payload, expected_serialized_hop_1_payload);
+
+ w = VecWriter(Vec::new());
+ payloads[2].write(&mut w).unwrap();
+ let hop_3_serialized_payload = w.0;
+ let expected_serialized_hop_3_payload = &hex::decode("12020230d4040204e206080000000000000003").unwrap()[..];
+ assert_eq!(hop_3_serialized_payload, expected_serialized_hop_3_payload);
+
+ w = VecWriter(Vec::new());
+ payloads[3].write(&mut w).unwrap();
+ let hop_4_serialized_payload = w.0;
+ let expected_serialized_hop_4_payload = &hex::decode("1202022710040203e806080000000000000004").unwrap()[..];
+ assert_eq!(hop_4_serialized_payload, expected_serialized_hop_4_payload);
+
+ let pad_keytype_seed = super::gen_pad_from_shared_secret(&get_test_session_key().secret_bytes());
+
+ let packet: msgs::OnionPacket = super::construct_onion_packet_with_writable_hopdata::<_>(payloads, onion_keys, pad_keytype_seed, &PaymentHash([0x42; 32]));
+
+ assert_eq!(packet.encode(), hex::decode("0002EEC7245D6B7D2CCB30380BFBE2A3648CD7A942653F5AA340EDCEA1F283686619F7F3416A5AA36DC7EEB3EC6D421E9615471AB870A33AC07FA5D5A51DF0A8823AABE3FEA3F90D387529D4F72837F9E687230371CCD8D263072206DBED0234F6505E21E282ABD8C0E4F5B9FF8042800BBAB065036EADD0149B37F27DDE664725A49866E052E809D2B0198AB9610FAA656BBF4EC516763A59F8F42C171B179166BA38958D4F51B39B3E98706E2D14A2DAFD6A5DF808093ABFCA5AEAACA16EDED5DB7D21FB0294DD1A163EDF0FB445D5C8D7D688D6DD9C541762BF5A5123BF9939D957FE648416E88F1B0928BFA034982B22548E1A4D922690EECF546275AFB233ACF4323974680779F1A964CFE687456035CC0FBA8A5428430B390F0057B6D1FE9A8875BFA89693EEB838CE59F09D207A503EE6F6299C92D6361BC335FCBF9B5CD44747AADCE2CE6069CFDC3D671DAEF9F8AE590CF93D957C9E873E9A1BC62D9640DC8FC39C14902D49A1C80239B6C5B7FD91D05878CBF5FFC7DB2569F47C43D6C0D27C438ABFF276E87364DEB8858A37E5A62C446AF95D8B786EAF0B5FCF78D98B41496794F8DCAAC4EEF34B2ACFB94C7E8C32A9E9866A8FA0B6F2A06F00A1CCDE569F97EEC05C803BA7500ACC96691D8898D73D8E6A47B8F43C3D5DE74458D20EDA61474C426359677001FBD75A74D7D5DB6CB4FEB83122F133206203E4E2D293F838BF8C8B3A29ACB321315100B87E80E0EDB272EE80FDA944E3FB6084ED4D7F7C7D21C69D9DA43D31A90B70693F9B0CC3EAC74C11AB8FF655905688916CFA4EF0BD04135F2E50B7C689A21D04E8E981E74C6058188B9B1F9DFC3EEC6838E9FFBCF22CE738D8A177C19318DFFEF090CEE67E12DE1A3E2A39F61247547BA5257489CBC11D7D91ED34617FCC42F7A9DA2E3CF31A94A210A1018143173913C38F60E62B24BF0D7518F38B5BAB3E6A1F8AEB35E31D6442C8ABB5178EFC892D2E787D79C6AD9E2FC271792983FA9955AC4D1D84A36C024071BC6E431B625519D556AF38185601F70E29035EA6A09C8B676C9D88CF7E05E0F17098B584C4168735940263F940033A220F40BE4C85344128B14BEB9E75696DB37014107801A59B13E89CD9D2258C169D523BE6D31552C44C82FF4BB18EC9F099F3BF0E5B1BB2BA9A87D7E26F98D294927B600B5529C47E04D98956677CBCEE8FA2B60F49776D8B8C367465B7C626DA53700684FB6C918EAD0EAB8360E4F60EDD25B4F43816A75ECF70F909301825B512469F8389D79402311D8AECB7B3EF8599E79485A4388D87744D899F7C47EE644361E17040A7958C8911BE6F463AB6A9B2AFACD688EC55EF517B38F1339EFC54487232798BB25522FF4572FF68567FE830F92F7B8113EFCE3E98C3FFFBAEDCE4FD8B50E41DA97C0C08E423A72689CC68E68F752A5E3A9003E64E35C957CA2E1C48BB6F64B05F56B70B575AD2F278D57850A7AD568C24A4D32A3D74B29F03DC125488BC7C637DA582357F40B0A52D16B3B40BB2C2315D03360BC24209E20972C200566BCF3BBE5C5B0AEDD83132A8A4D5B4242BA370B6D67D9B67EB01052D132C7866B9CB502E44796D9D356E4E3CB47CC527322CD24976FE7C9257A2864151A38E568EF7A79F10D6EF27CC04CE382347A2488B1F404FDBF407FE1CA1C9D0D5649E34800E25E18951C98CAE9F43555EEF65FEE1EA8F15828807366C3B612CD5753BF9FB8FCED08855F742CDDD6F765F74254F03186683D646E6F09AC2805586C7CF11998357CAFC5DF3F285329366F475130C928B2DCEBA4AA383758E7A9D20705C4BB9DB619E2992F608A1BA65DB254BB389468741D0502E2588AEB54390AC600C19AF5C8E61383FC1BEBE0029E4474051E4EF908828DB9CCA13277EF65DB3FD47CCC2179126AAEFB627719F421E20").unwrap());
}
#[test]
writer.write_all(&self.data[..])
}
}
-
- #[test]
- fn variable_length_onion_vectors() {
- // Packet creation test vectors from BOLT 4 (as of this writing at
- // bolt04/onion-test-multi-frame.json in the spec repo).
- // Note that we use he RawOnionHopData for everything except Legacy hops, as even the hops
- // with "type": "tlv" are not valid TLV (they were for a previous version of TLV that
- // didn't move forward), and, thus, cannot be directly represented in our in-memory enums.
- let onion_keys = build_test_onion_keys();
-
- let payloads = vec!(
- RawOnionHopData::new(msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0,
- },
- amt_to_forward: 0,
- outgoing_cltv_value: 0,
- }),
- RawOnionHopData {
- data: hex::decode("140101010101010101000000000000000100000001").unwrap(),
- },
- RawOnionHopData {
- data: hex::decode("fd0100000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff").unwrap(),
- },
- RawOnionHopData {
- data: hex::decode("140303030303030303000000000000000300000003").unwrap(),
- },
- RawOnionHopData::new(msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::Legacy {
- short_channel_id: 0x0404040404040404,
- },
- amt_to_forward: 4,
- outgoing_cltv_value: 4,
- }),
- );
-
- let packet: msgs::OnionPacket = super::construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, super::FixedSizeOnionPacket([0; super::ONION_DATA_LEN]), Some(&PaymentHash([0x42; 32])));
- // Just check the final packet encoding, as it includes all the per-hop vectors in it
- // anyway...
- assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a71a060daf367132b378b3a3883c0e2c0e026b8900b2b5cdbc784e1a3bb913f88a9c50f7d61ab590531cf08000178a333a347f8b4072ed056f820f77774345e183a342ec4729f3d84accf515e88adddb85ecc08daba68404bae9a8e8d7178977d7094a1ae549f89338c0777551f874159eb42d3a59fb9285ad4e24883f27de23942ec966611e99bee1cee503455be9e8e642cef6cef7b9864130f692283f8a973d47a8f1c1726b6e59969385975c766e35737c8d76388b64f748ee7943ffb0e2ee45c57a1abc40762ae598723d21bd184e2b338f68ebff47219357bd19cd7e01e2337b806ef4d717888e129e59cd3dc31e6201ccb2fd6d7499836f37a993262468bcb3a4dcd03a22818aca49c6b7b9b8e9e870045631d8e039b066ff86e0d1b7291f71cefa7264c70404a8e538b566c17ccc5feab231401e6c08a01bd5edfc1aa8e3e533b96e82d1f91118d508924b923531929aea889fcdf050597c681185f336b1da63b0939aa2b7c50b21b5eb7b6ad66c81fab98a3cdf73f658149e7e9ced4edde5d38c9b8f92e16f6b4ab13d7fca6a0e4ecc9f9de611a90da6e99c39551094c56e3196f282c5dffd9fc4b2fc12f3bca8e6fe47eb45fbdd3be21a8a8d200797eae3c9a0497132f92410d804977408494dff49dd3d8bce248e0b74fd9e6f0f7102c25ddfa02bd9ad9f746abbfa337ef811d5345a9e16b60de1767b209645ba40bd1f9a5f75bc04feca9b27c5554be4fe83fac2cb83aa447a817bb85ae966c68b420063833fada375e2f515965e687a45699632902672c654d1d18d7bcbf55e8fa57f63f2da449f8e1e606e8722df081e5f193fc4179feb99ad22819afdeef211f7c54afdba92aeef0c00b7bc2b65a4813c01f907a8377585708f2d4c940a25328e585714c8ded0a9a4d7a6de1027c1cb7a0198cd3db68b58c0704dfd0cfbe624e9cd18cc0ae5d96697bb476708b9ee0403d211e64e0d5a7683a7a9a140c02f0ff1c6e67a302941b4052bdea8a63e70a3ad62c5b89c698f1fd3c7685cb49705096cad702d02d93bcb1c27a409f4c9bddec001205ca4a2740f19b50900be81c7e847f1a863deea8d35701f1355cad8db57b1d4eb2ab4e29587734785abfb46ddede71928213d7d089dfdeda052827f459f1688cc0935bd47e7bcec27427c8376dcce7e22699567c0d145f8a7db33f6758815f1f15f9f7a9760dec4f34ae095edda4c64e9735bdd029c4e32c2ee31ba47ec5e6bdb97813d52dbd15b4e0b7a2c7f790ae64104d99f38c127f0a093288fa34144adb16b8968d4fa7656fcec99de8503dd46d3b03620a71c7cd085364abd30dccf7fbda25a1cdc102600149c9af1c97aa0372cd2e1909f28ac5c686f432b310e79528c9b8b9e8f314c1e74621ce6308ad2278b81d460892e0d9dd38b7c76d58be6dfd10ae7583ee1e7ef5b3f6f78dc60af0950df1b00cc55b6d178ba2e476bea0eaeef49323b83f05804159e7aef4eed4cc60dd07be76f067dfd0bcfb0b806b69ba921336a20c43c832d0cab8fa3ddeb29e3bf07b0d98a112eb07802756235a49d44a8b82a950d84e95e01971f0e106ccb337f07384e21620e0ad39e16ed9edca123226cf55ac44f449eeb53e38a7f27d101806e4823e4efcc887414240ee6826c4a5cb1c6443ad36ebf905a435c1d9054e54173911b17b5b40f60b3d9fd5f12eac54ca1e20191f5f18544d5fd3d665e9bcef96fb44b76110aa64d9db4c86c9513cbdad546538e8aec521fbe83ceac5e74a15629f1ed0b870a1d0d1e5680b6d6100d1bd3f3b9043bd35b8919c4088f1949b8be89e4701eb870f8ed64fafa446c78df3ea").unwrap());
- }
}
//! payments thereafter.
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
-use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::KeysInterface;
use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
-use crate::ln::channelmanager::{self, BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS};
+use crate::ln::channelmanager::{self, BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS};
use crate::ln::msgs;
use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, get_route};
use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::util::test_utils;
use crate::util::errors::APIError;
-use crate::util::enforcing_trait_impls::EnforcingSigner;
-use crate::util::ser::{ReadableArgs, Writeable};
-use crate::io;
+use crate::util::ser::Writeable;
-use bitcoin::{Block, BlockHeader, BlockHash, TxMerkleNode};
+use bitcoin::{Block, BlockHeader, TxMerkleNode};
use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use crate::prelude::*;
use crate::ln::functional_test_utils::*;
+use crate::routing::gossip::NodeId;
#[test]
fn retry_single_path_payment() {
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the `commitment_signed_dance!()`.
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: test_default_channel_config(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- nodes[0].node = &nodes_0_deserialized;
- check_added_monitors!(nodes[0], 1);
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the `commitment_signed_dance!()`.
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-
- let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-
- macro_rules! reload_node {
- ($chain_monitor: ident, $chan_manager: ident, $persister: ident) => { {
- $persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- $chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &$persister, keys_manager);
- nodes[0].chain_monitor = &$chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut chan_1_monitor = None;
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-
- if !chan_1_monitor_serialized.0.is_empty() {
- let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..];
- chan_1_monitor = Some(<(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_1_monitor_read, keys_manager).unwrap().1);
- assert!(chan_1_monitor_read.is_empty());
- channel_monitors.insert(chan_1_monitor.as_ref().unwrap().get_funding_txo().0, chan_1_monitor.as_mut().unwrap());
- }
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- let (_, nodes_0_deserialized_tmp) = {
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
- default_config: test_default_channel_config(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap()
- };
- $chan_manager = nodes_0_deserialized_tmp;
- assert!(nodes_0_read.is_empty());
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- if !chan_1_monitor_serialized.0.is_empty() {
- let funding_txo = chan_1_monitor.as_ref().unwrap().get_funding_txo().0;
- assert_eq!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()),
- ChannelMonitorUpdateStatus::Completed);
- }
- nodes[0].node = &$chan_manager;
- check_added_monitors!(nodes[0], if !chan_1_monitor_serialized.0.is_empty() { 2 } else { 1 });
-
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- } }
- }
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
- reload_node!(first_new_chain_monitor, first_nodes_0_deserialized, first_persister);
+ reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
// We set mpp_parts_remain to avoid having abandon_payment called
expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
- chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
- chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id_3).write(&mut chan_1_monitor_serialized).unwrap();
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
nodes_0_serialized = nodes[0].node.encode();
assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- reload_node!(second_new_chain_monitor, second_nodes_0_deserialized, second_persister);
+ reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
- chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id_3).write(&mut chan_1_monitor_serialized).unwrap();
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
nodes_0_serialized = nodes[0].node.encode();
// Ensure that after reload we cannot retry the payment.
- reload_node!(third_new_chain_monitor, third_nodes_0_deserialized, third_persister);
+ reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
.get_mut(&funding_txo).unwrap().drain().collect();
- // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice
- assert!(mon_updates.len() == 1 || mon_updates.len() == 2);
+ // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
+ // If we're testing connection idempotency we may get substantially more.
+ assert!(mon_updates.len() >= 1);
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
// If we persist the ChannelManager here, we should get the PaymentSent event after
// deserialization.
- let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
+ let mut chan_manager_serialized = Vec::new();
if !persist_manager_post_event {
- nodes[0].node.write(&mut chan_manager_serialized).unwrap();
+ chan_manager_serialized = nodes[0].node.encode();
}
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
// payment sent event.
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
for update in mon_updates {
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
}
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
// twice.
if persist_manager_post_event {
- nodes[0].node.write(&mut chan_manager_serialized).unwrap();
+ chan_manager_serialized = nodes[0].node.encode();
}
// Now reload nodes[0]...
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let (_, nodes_0_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
- ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
- default_config: Default::default(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_0_deserialized = nodes_0_deserialized_tmp;
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[0], 1);
- nodes[0].node = &nodes_0_deserialized;
+ reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
if persist_manager_post_event {
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
// The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
// pre-fulfill, which we do by serializing it here.
- let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[1].node.write(&mut chan_manager_serialized).unwrap();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[1], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
+ let chan_manager_serialized = nodes[1].node.encode();
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode();
nodes[1].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[1], 1);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
// Now reload nodes[1]...
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[1].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
- nodes[1].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let (_, nodes_1_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
- ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
- default_config: Default::default(),
- keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: nodes[1].chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors,
- }).unwrap()
- };
- nodes_1_deserialized = nodes_1_deserialized_tmp;
-
- assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[1], 1);
- nodes[1].node = &nodes_1_deserialized;
+ reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
// payment_id, it should be rejected.
let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
match send_result {
- Err(PaymentSendFailure::ParameterError(APIError::RouteError { err: "Payment already in progress" })) => {},
+ Err(PaymentSendFailure::DuplicatePayment) => {},
_ => panic!("Unexpected send result: {:?}", send_result),
}
// also be rejected.
let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
match send_result {
- Err(PaymentSendFailure::ParameterError(APIError::RouteError { err: "Payment already in progress" })) => {},
+ Err(PaymentSendFailure::DuplicatePayment) => {},
_ => panic!("Unexpected send result: {:?}", send_result),
}
}
// payment_id, it should be rejected.
let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
match send_result {
- Err(PaymentSendFailure::ParameterError(APIError::RouteError { err: "Payment already in progress" })) => {},
+ Err(PaymentSendFailure::DuplicatePayment) => {},
_ => panic!("Unexpected send result: {:?}", send_result),
}
// also be rejected.
let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
match send_result {
- Err(PaymentSendFailure::ParameterError(APIError::RouteError { err: "Payment already in progress" })) => {},
+ Err(PaymentSendFailure::DuplicatePayment) => {},
_ => panic!("Unexpected send result: {:?}", send_result),
}
}
pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
}
+
+#[test]
+fn test_trivial_inflight_htlc_tracking(){
+ // In this test, we test three scenarios:
+ // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs
+ // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs
+ // (3) After we claim the payment sent in (2), InFlightHtlcs should return `None` for the query.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ // Send and claim the payment. Inflight HTLCs should be empty.
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+ {
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
+
+ let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
+ let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+
+ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+ channel_1.get_short_channel_id().unwrap()
+ );
+ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
+ channel_2.get_short_channel_id().unwrap()
+ );
+
+ assert_eq!(chan_1_used_liquidity, None);
+ assert_eq!(chan_2_used_liquidity, None);
+ }
+
+ // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
+ let (payment_preimage, _, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+ {
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
+
+ let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
+ let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+
+ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+ channel_1.get_short_channel_id().unwrap()
+ );
+ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
+ channel_2.get_short_channel_id().unwrap()
+ );
+
+ // First hop accounts for expected 1000 msat fee
+ assert_eq!(chan_1_used_liquidity, Some(501000));
+ assert_eq!(chan_2_used_liquidity, Some(500000));
+ }
+
+ // Now, let's claim the payment. This should result in the used liquidity to return `None`.
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+ {
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
+
+ let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
+ let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+
+ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+ channel_1.get_short_channel_id().unwrap()
+ );
+ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
+ channel_2.get_short_channel_id().unwrap()
+ );
+
+ assert_eq!(chan_1_used_liquidity, None);
+ assert_eq!(chan_2_used_liquidity, None);
+ }
+}
+
+#[test]
+fn test_holding_cell_inflight_htlcs() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
+ let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
+
+ // Queue up two payments - one will be delivered right away, one immediately goes into the
+ // holding cell as nodes[0] is AwaitingRAA.
+ {
+ nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ }
+
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
+
+ {
+ let channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let channel = channel_lock.by_id.get(&channel_id).unwrap();
+
+ let used_liquidity = inflight_htlcs.used_liquidity_msat(
+ &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
+ &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
+ channel.get_short_channel_id().unwrap()
+ );
+
+ assert_eq!(used_liquidity, Some(2000000));
+ }
+
+ // Clear pending events so test doesn't throw a "Had excess message on node..." error
+ nodes[0].node.get_and_clear_pending_msg_events();
+}
//! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
//! LSP).
-use crate::chain::{ChannelMonitorUpdateStatus, Watch};
-use crate::chain::channelmonitor::ChannelMonitor;
+use crate::chain::ChannelMonitorUpdateStatus;
use crate::chain::keysinterface::{Recipient, KeysInterface};
-use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA, PaymentId};
+use crate::ln::channelmanager::{self, ChannelManager, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use crate::routing::gossip::RoutingFees;
use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
use crate::ln::features::ChannelTypeFeatures;
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
use crate::ln::wire::Encode;
-use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::util::config::UserConfig;
-use crate::util::ser::{Writeable, ReadableArgs};
+use crate::util::ser::Writeable;
use crate::util::test_utils;
use crate::prelude::*;
use crate::ln::functional_test_utils::*;
use bitcoin::blockdata::constants::genesis_block;
-use bitcoin::hash_types::BlockHash;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::network::constants::Network;
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
let nodes_1_serialized = nodes[1].node.encode();
- let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new());
- let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[1], chan_id_1).write(&mut monitor_a_serialized).unwrap();
- get_monitor!(nodes[1], chan_id_2).write(&mut monitor_b_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[1].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
- nodes[1].chain_monitor = &new_chain_monitor;
-
- let mut monitor_a_read = &monitor_a_serialized.0[..];
- let mut monitor_b_read = &monitor_b_serialized.0[..];
- let (_, mut monitor_a) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_a_read, keys_manager).unwrap();
- let (_, mut monitor_b) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_b_read, keys_manager).unwrap();
- assert!(monitor_a_read.is_empty());
- assert!(monitor_b_read.is_empty());
+ let monitor_a_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let monitor_b_serialized = get_monitor!(nodes[1], chan_id_2).encode();
no_announce_cfg.accept_forwards_to_priv_channels = true;
-
- let mut nodes_1_read = &nodes_1_serialized[..];
- let (_, nodes_1_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(monitor_a.get_funding_txo().0, &mut monitor_a);
- channel_monitors.insert(monitor_b.get_funding_txo().0, &mut monitor_b);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
- default_config: no_announce_cfg,
- keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: nodes[1].chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors,
- }).unwrap()
- };
- assert!(nodes_1_read.is_empty());
- nodes_1_deserialized = nodes_1_deserialized_tmp;
-
- assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a),
- ChannelMonitorUpdateStatus::Completed);
- assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[1], 2);
- nodes[1].node = &nodes_1_deserialized;
+ reload_node!(nodes[1], no_announce_cfg, &nodes_1_serialized, &[&monitor_a_serialized, &monitor_b_serialized], persister, new_chain_monitor, nodes_1_deserialized);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Functional tests which test for correct behavior across node restarts.
+
+use crate::chain::{ChannelMonitorUpdateStatus, Watch};
+use crate::chain::channelmonitor::ChannelMonitor;
+use crate::chain::transaction::OutPoint;
+use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
+use crate::ln::msgs;
+use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
+use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_utils;
+use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use crate::util::ser::{Writeable, ReadableArgs};
+use crate::util::config::UserConfig;
+
+use bitcoin::hash_types::BlockHash;
+
+use crate::prelude::*;
+use core::default::Default;
+use crate::sync::Mutex;
+
+use crate::ln::functional_test_utils::*;
+
+#[test]
+fn test_funding_peer_disconnect() {
+ // Test that we can lock in our funding tx while disconnected
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[0], &tx);
+ let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert!(events_1.is_empty());
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[1], &tx);
+ let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert!(events_2.is_empty());
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
+
+ // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
+ let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_3.len(), 1);
+ let as_channel_ready = match events_3[0] {
+ MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_3[0]),
+ };
+
+ // nodes[1] received nodes[0]'s channel_ready on the first reconnect above, so it should send
+ // announcement_signatures as well as channel_update.
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
+ let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_4.len(), 3);
+ let chan_id;
+ let bs_channel_ready = match events_4[0] {
+ MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ chan_id = msg.channel_id;
+ msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_4[0]),
+ };
+ let bs_announcement_sigs = match events_4[1] {
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_4[1]),
+ };
+ match events_4[2] {
+ MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event {:?}", events_4[2]),
+ }
+
+ // Re-deliver nodes[0]'s channel_ready, which nodes[1] can safely ignore. It currently
+ // generates a duplicative private channel_update
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+ let events_5 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event {:?}", events_5[0]),
+ };
+
+ // When we deliver nodes[1]'s channel_ready, however, nodes[0] will generate its
+ // announcement_signatures.
+ nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
+ let events_6 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_6.len(), 1);
+ let as_announcement_sigs = match events_6[0] {
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_6[0]),
+ };
+ expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
+ expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+ // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately
+ // broadcast the channel announcement globally, as well as re-send its (now-public)
+ // channel_update.
+ nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
+ let events_7 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_7.len(), 1);
+ let (chan_announcement, as_update) = match events_7[0] {
+ MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+ (msg.clone(), update_msg.clone())
+ },
+ _ => panic!("Unexpected event {:?}", events_7[0]),
+ };
+
+ // Finally, deliver nodes[0]'s announcement_signatures to nodes[1] and make sure it creates the
+ // same channel_announcement.
+ nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
+ let events_8 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_8.len(), 1);
+ let bs_update = match events_8[0] {
+ MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+ assert_eq!(*msg, chan_announcement);
+ update_msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_8[0]),
+ };
+
+ // Provide the channel announcement and public updates to the network graph
+ nodes[0].gossip_sync.handle_channel_announcement(&chan_announcement).unwrap();
+ nodes[0].gossip_sync.handle_channel_update(&bs_update).unwrap();
+ nodes[0].gossip_sync.handle_channel_update(&as_update).unwrap();
+
+ let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
+ let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+
+ // Check that after deserialization and reconnection we can still generate an identical
+ // channel_announcement from the cached signatures.
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+}
+
+#[test]
+fn test_no_txn_manager_serialize_deserialize() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let chan_0_monitor_serialized =
+ get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
+ reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+ for node in nodes.iter() {
+ assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
+ node.gossip_sync.handle_channel_update(&as_update).unwrap();
+ node.gossip_sync.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 1000000);
+}
+
+#[test]
+fn test_manager_serialize_deserialize_events() {
+ // This test makes sure the events field in ChannelManager survives de/serialization
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Start creating a channel, but stop right before broadcasting the funding transaction
+ let channel_value = 100000;
+ let push_msat = 10001;
+ let a_flags = channelmanager::provided_init_features();
+ let b_flags = channelmanager::provided_init_features();
+ let node_a = nodes.remove(0);
+ let node_b = nodes.remove(0);
+ node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
+ node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
+ node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
+
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42);
+
+ node_a.node.funding_transaction_generated(&temporary_channel_id, &node_b.node.get_our_node_id(), tx.clone()).unwrap();
+ check_added_monitors!(node_a, 0);
+
+ node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
+ {
+ let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+
+ let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id());
+ node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &bs_funding_signed);
+ {
+ let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+ // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
+
+ nodes.push(node_a);
+ nodes.push(node_b);
+
+ // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
+ reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // After deserializing, make sure the funding_transaction is still held by the channel manager
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 0);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
+
+ // Make sure the channel is functioning as though the de/serialization never happened
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+ for node in nodes.iter() {
+ assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
+ node.gossip_sync.handle_channel_update(&as_update).unwrap();
+ node.gossip_sync.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 1000000);
+}
+
+#[test]
+fn test_simple_manager_serialize_deserialize() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
+ claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
+}
+
+#[test]
+fn test_manager_serialize_deserialize_inconsistent_monitor() {
+ // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let logger: test_utils::TestLogger;
+ let fee_estimator: test_utils::TestFeeEstimator;
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ let mut node_0_stale_monitors_serialized = Vec::new();
+ for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
+ let mut writer = test_utils::TestVecWriter(Vec::new());
+ get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap();
+ node_0_stale_monitors_serialized.push(writer.0);
+ }
+
+ let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
+
+ // Serialize the ChannelManager here, but the monitor we keep up-to-date
+ let nodes_0_serialized = nodes[0].node.encode();
+
+ route_payment(&nodes[0], &[&nodes[3]], 1000000);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
+ // nodes[3])
+ let mut node_0_monitors_serialized = Vec::new();
+ for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
+ node_0_monitors_serialized.push(get_monitor!(nodes[0], chan_id_iter).encode());
+ }
+
+ logger = test_utils::TestLogger::new();
+ fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
+ persister = test_utils::TestPersister::new();
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
+ nodes[0].chain_monitor = &new_chain_monitor;
+
+
+ let mut node_0_stale_monitors = Vec::new();
+ for serialized in node_0_stale_monitors_serialized.iter() {
+ let mut read = &serialized[..];
+ let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
+ assert!(read.is_empty());
+ node_0_stale_monitors.push(monitor);
+ }
+
+ let mut node_0_monitors = Vec::new();
+ for serialized in node_0_monitors_serialized.iter() {
+ let mut read = &serialized[..];
+ let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
+ assert!(read.is_empty());
+ node_0_monitors.push(monitor);
+ }
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ if let Err(msgs::DecodeError::InvalidValue) =
+ <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: UserConfig::default(),
+ keys_manager,
+ fee_estimator: &fee_estimator,
+ chain_monitor: nodes[0].chain_monitor,
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: &logger,
+ channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+ }) { } else {
+ panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
+ };
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let (_, nodes_0_deserialized_tmp) =
+ <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: UserConfig::default(),
+ keys_manager,
+ fee_estimator: &fee_estimator,
+ chain_monitor: nodes[0].chain_monitor,
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: &logger,
+ channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+ }).unwrap();
+ nodes_0_deserialized = nodes_0_deserialized_tmp;
+ assert!(nodes_0_read.is_empty());
+
+ { // Channel close should result in a commitment tx
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ }
+
+ for monitor in node_0_monitors.drain(..) {
+ assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ ChannelMonitorUpdateStatus::Completed);
+ check_added_monitors!(nodes[0], 1);
+ }
+ nodes[0].node = &nodes_0_deserialized;
+ check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+
+ // nodes[1] and nodes[2] have no lost state with nodes[0]...
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ //... and we can even still claim the payment!
+ claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
+
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
+ let mut found_err = false;
+ for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::HandleError { ref action, .. } = msg_event {
+ match action {
+ &ErrorAction::SendErrorMessage { ref msg } => {
+ assert_eq!(msg.channel_id, channel_id);
+ assert!(!found_err);
+ found_err = true;
+ },
+ _ => panic!("Unexpected event!"),
+ }
+ }
+ }
+ assert!(found_err);
+}
+
+fn do_test_data_loss_protect(reconnect_panicing: bool) {
+ // When we get a data_loss_protect proving we're behind, we immediately panic as the
+ // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
+ // panic message informs the user they should force-close without broadcasting, which is tested
+ // if `reconnect_panicing` is not set.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
+ // during signing due to revoked tx
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
+ let persister;
+ let new_chain_monitor;
+ let nodes_0_deserialized;
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+ // Cache node A state before any channel update
+ let previous_node_state = nodes[0].node.encode();
+ let previous_chain_monitor_state = get_monitor!(nodes[0], chan.2).encode();
+
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
+
+ if reconnect_panicing {
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+
+ // Check we close channel detecting A is fallen-behind
+ // Check that we sent the warning message when we detected that A has fallen behind,
+ // and give the possibility for A to recover from the warning.
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+ assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+
+ {
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ // The node B should not broadcast the transaction to force close the channel!
+ assert!(node_txn.is_empty());
+ }
+
+ let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ // Check A panics upon seeing proof it has fallen behind.
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
+ return; // By this point we should have panic'ed!
+ }
+
+ nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ {
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 0);
+ }
+
+ for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+ } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
+ match action {
+ &ErrorAction::SendErrorMessage { ref msg } => {
+ assert_eq!(msg.data, "Channel force-closed");
+ },
+ _ => panic!("Unexpected event!"),
+ }
+ } else {
+ panic!("Unexpected event {:?}", msg)
+ }
+ }
+
+ // after the warning message sent by B, we should not able to
+ // use the channel, or reconnect with success to the channel.
+ assert!(nodes[0].node.list_usable_channels().is_empty());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
+ let mut err_msgs_0 = Vec::with_capacity(1);
+ for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::HandleError { ref action, .. } = msg {
+ match action {
+ &ErrorAction::SendErrorMessage { ref msg } => {
+ assert_eq!(msg.data, "Failed to find corresponding channel");
+ err_msgs_0.push(msg.clone());
+ },
+ _ => panic!("Unexpected event!"),
+ }
+ } else {
+ panic!("Unexpected event!");
+ }
+ }
+ assert_eq!(err_msgs_0.len(), 1);
+ nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
+ assert!(nodes[1].node.list_usable_channels().is_empty());
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
+ check_closed_broadcast!(nodes[1], false);
+}
+
+#[test]
+#[should_panic]
+fn test_data_loss_protect_showing_stale_state_panics() {
+ do_test_data_loss_protect(true);
+}
+
+#[test]
+fn test_force_close_without_broadcast() {
+ do_test_data_loss_protect(false);
+}
+
+#[test]
+fn test_forwardable_regen() {
+ // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the
+ // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive
+ // HTLCs.
+ // We test it for both payment receipt and payment forwarding.
+
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ // First send a payment to nodes[1]
+ let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+
+ // Next send a payment which is forwarded by nodes[1]
+ let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
+ nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ // There is already a PendingHTLCsForwardable event "pending" so another one will not be
+ // generated
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+ reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
+ // the commitment state.
+ reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
+}
+
+fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
+ // Test what happens if a node receives an MPP payment, claims it, but crashes before
+ // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
+ // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
+ // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+ // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
+ // not have the preimage tied to the still-pending HTLC.
+ //
+ // To get to the correct state, on startup we should propagate the preimage to the
+ // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
+ // receiving the preimage without a state update.
+ //
+ // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was
+ // definitely claimed.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ // Create an MPP route for 15k sats, more than the default htlc-max of 10%
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+ assert_eq!(route.paths.len(), 2);
+ route.paths.sort_by(|path_a, _| {
+ // Sort the path so that the path through nodes[1] comes first
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+ });
+
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_events.len(), 2);
+ do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
+ do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
+
+ // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
+ // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
+ let mut original_monitor = test_utils::TestVecWriter(Vec::new());
+ if !persist_both_monitors {
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_not_persisted {
+ assert!(original_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+ }
+ }
+ }
+
+ let original_manager = nodes[3].node.encode();
+
+ expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+
+ nodes[3].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[3], 2);
+ expect_payment_claimed!(nodes[3], payment_hash, 15_000_000);
+
+ // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
+ // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
+ // with the old ChannelManager.
+ let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_persisted {
+ assert!(updated_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
+ }
+ }
+ // If `persist_both_monitors` is set, get the second monitor here as well
+ if persist_both_monitors {
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_not_persisted {
+ assert!(original_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+ }
+ }
+ }
+
+ // Now restart nodes[3].
+ reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized);
+
+ // On startup the preimage should have been copied into the non-persisted monitor:
+ assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
+ assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
+
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+
+ // During deserialization, we should have closed one channel and broadcast its latest
+ // commitment transaction. We should also still have the original PaymentReceived event we
+ // never finished processing.
+ let events = nodes[3].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
+ if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
+ if persist_both_monitors {
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+ }
+
+ // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
+ // ChannelManager prior to handling the original one.
+ if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
+ events[if persist_both_monitors { 3 } else { 2 }]
+ {
+ assert_eq!(payment_hash, our_payment_hash);
+ } else { panic!(); }
+
+ assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
+ if !persist_both_monitors {
+ // If one of the two channels is still live, reveal the payment preimage over it.
+
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
+
+ nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
+ get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
+ assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
+
+ // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
+ // claim should fly.
+ let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[3], 1);
+ assert_eq!(ds_msgs.len(), 2);
+ if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
+
+ let cs_updates = match ds_msgs[1] {
+ MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ check_added_monitors!(nodes[2], 1);
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+ expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+ cs_updates
+ }
+ _ => panic!(),
+ };
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
+ expect_payment_sent!(nodes[0], payment_preimage);
+ }
+}
+
+#[test]
+fn test_partial_claim_before_restart() {
+ do_test_partial_claim_before_restart(false);
+ do_test_partial_claim_before_restart(true);
+}
//! Further functional tests which test blockchain reorganizations.
-use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
+use crate::chain::channelmonitor::ANTI_REORG_DELAY;
use crate::chain::transaction::OutPoint;
-use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
-use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs};
+use crate::chain::Confirm;
+use crate::ln::channelmanager::{self, ChannelManager};
use crate::ln::msgs::ChannelMessageHandler;
-use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::test_utils;
-use crate::util::ser::{ReadableArgs, Writeable};
+use crate::util::ser::Writeable;
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
-use bitcoin::hash_types::BlockHash;
use bitcoin::secp256k1::Secp256k1;
use crate::prelude::*;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
*nodes[0].connect_style.borrow_mut() = connect_style;
+ let chan_conf_height = core::cmp::max(nodes[0].best_block_info().1 + 1, nodes[1].best_block_info().1 + 1);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
let channel_state = nodes[0].node.channel_state.lock().unwrap();
assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 2);
mem::drop(channel_state);
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(10));
+ assert_eq!(nodes[1].node.list_channels()[0].confirmations, Some(10));
+
if !reorg_after_reload {
if use_funding_unconfirmed {
let relevant_txids = nodes[0].node.get_relevant_txids();
- assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
- nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
+ assert_eq!(relevant_txids.len(), 1);
+ let block_hash_opt = relevant_txids[0].1;
+ let expected_hash = nodes[0].get_block_header(chan_conf_height).block_hash();
+ assert_eq!(block_hash_opt, Some(expected_hash));
+ let txid = relevant_txids[0].0;
+ assert_eq!(txid, chan.3.txid());
+ nodes[0].node.transaction_unconfirmed(&txid);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
} else if connect_style == ConnectStyle::FullBlockViaListen {
disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(1));
disconnect_blocks(&nodes[0], 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
} else {
disconnect_all_blocks(&nodes[0]);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
}
+
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 0);
+
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
check_added_monitors!(nodes[1], 1);
{
// the Channel object from the ChannelManager, but still having a monitor event pending for
// it when we go to deserialize, and then use the ChannelManager.
let nodes_0_serialized = nodes[0].node.encode();
- let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[0], chan.2).write(&mut chan_0_monitor_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[0].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
- nodes[0].chain_monitor = &new_chain_monitor;
- let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut chan_0_monitor_read, keys_manager).unwrap();
- assert!(chan_0_monitor_read.is_empty());
-
- let mut nodes_0_read = &nodes_0_serialized[..];
- nodes_0_deserialized = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster,
- &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(
- &mut nodes_0_read, ChannelManagerReadArgs {
- default_config: *nodes[0].node.get_current_default_configuration(),
- keys_manager,
- fee_estimator: node_cfgs[0].fee_estimator,
- chain_monitor: nodes[0].chain_monitor,
- tx_broadcaster: nodes[0].tx_broadcaster.clone(),
- logger: nodes[0].logger,
- channel_monitors,
- }).unwrap().1
- };
- nodes[0].node = &nodes_0_deserialized;
- assert!(nodes_0_read.is_empty());
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
+
+ reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
if !reorg_after_reload {
// If the channel is already closed when we reload the node, we'll broadcast a closing
// transaction via the ChannelMonitor which is missing a corresponding channel.
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
}
-
- assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
- ChannelMonitorUpdateStatus::Completed);
- check_added_monitors!(nodes[0], 1);
}
if reorg_after_reload {
if use_funding_unconfirmed {
let relevant_txids = nodes[0].node.get_relevant_txids();
- assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
- nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
+ assert_eq!(relevant_txids.len(), 1);
+ let block_hash_opt = relevant_txids[0].1;
+ let expected_hash = nodes[0].get_block_header(chan_conf_height).block_hash();
+ assert_eq!(block_hash_opt, Some(expected_hash));
+ let txid = relevant_txids[0].0;
+ assert_eq!(txid, chan.3.txid());
+ nodes[0].node.transaction_unconfirmed(&txid);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
} else if connect_style == ConnectStyle::FullBlockViaListen {
disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
assert_eq!(nodes[0].node.list_channels().len(), 1);
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(1));
disconnect_blocks(&nodes[0], 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
} else {
disconnect_all_blocks(&nodes[0]);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
}
+
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 0);
+
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
check_added_monitors!(nodes[1], 1);
{
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Implementation of Lightning Offers
+//! ([BOLT 12](https://github.com/lightning/bolts/blob/master/12-offer-encoding.md)).
+//!
+//! Offers are a flexible protocol for Lightning payments.
+
+pub mod offer;
+pub mod parse;
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Data structures and encoding for `offer` messages.
+//!
+//! An [`Offer`] represents an "offer to be paid." It is typically constructed by a merchant and
+//! published as a QR code to be scanned by a customer. The customer uses the offer to request an
+//! invoice from the merchant to be paid.
+//!
+//! ```ignore
+//! extern crate bitcoin;
+//! extern crate core;
+//! extern crate lightning;
+//!
+//! use core::convert::TryFrom;
+//! use core::num::NonZeroU64;
+//! use core::time::Duration;
+//!
+//! use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
+//! use lightning::offers::offer::{Offer, OfferBuilder, Quantity};
+//! use lightning::offers::parse::ParseError;
+//! use lightning::util::ser::{Readable, Writeable};
+//!
+//! # use lightning::onion_message::BlindedPath;
+//! # #[cfg(feature = "std")]
+//! # use std::time::SystemTime;
+//! #
+//! # fn create_blinded_path() -> BlindedPath { unimplemented!() }
+//! # fn create_another_blinded_path() -> BlindedPath { unimplemented!() }
+//! #
+//! # #[cfg(feature = "std")]
+//! # fn build() -> Result<(), ParseError> {
+//! let secp_ctx = Secp256k1::new();
+//! let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+//! let pubkey = PublicKey::from(keys);
+//!
+//! let expiration = SystemTime::now() + Duration::from_secs(24 * 60 * 60);
+//! let offer = OfferBuilder::new("coffee, large".to_string(), pubkey)
+//! .amount_msats(20_000)
+//! .supported_quantity(Quantity::Unbounded)
+//! .absolute_expiry(expiration.duration_since(SystemTime::UNIX_EPOCH).unwrap())
+//! .issuer("Foo Bar".to_string())
+//! .path(create_blinded_path())
+//! .path(create_another_blinded_path())
+//! .build()?;
+//!
+//! // Encode as a bech32 string for use in a QR code.
+//! let encoded_offer = offer.to_string();
+//!
+//! // Parse from a bech32 string after scanning from a QR code.
+//! let offer = encoded_offer.parse::<Offer>()?;
+//!
+//! // Encode offer as raw bytes.
+//! let mut bytes = Vec::new();
+//! offer.write(&mut bytes).unwrap();
+//!
+//! // Decode raw bytes into an offer.
+//! let offer = Offer::try_from(bytes)?;
+//! # Ok(())
+//! # }
+//! ```
+
+use bitcoin::blockdata::constants::ChainHash;
+use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::PublicKey;
+use core::convert::TryFrom;
+use core::num::NonZeroU64;
+use core::str::FromStr;
+use core::time::Duration;
+use crate::io;
+use crate::ln::features::OfferFeatures;
+use crate::ln::msgs::MAX_VALUE_MSAT;
+use crate::offers::parse::{Bech32Encode, ParseError, ParsedMessage, SemanticError};
+use crate::onion_message::BlindedPath;
+use crate::util::ser::{HighZeroBytesDroppedBigSize, WithoutLength, Writeable, Writer};
+use crate::util::string::PrintableString;
+
+use crate::prelude::*;
+
+#[cfg(feature = "std")]
+use std::time::SystemTime;
+
+/// Builds an [`Offer`] for the "offer to be paid" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [module-level documentation]: self
+pub struct OfferBuilder {
+ offer: OfferContents,
+}
+
+impl OfferBuilder {
+ /// Creates a new builder for an offer setting the [`Offer::description`] and using the
+ /// [`Offer::signing_pubkey`] for signing invoices. The associated secret key must be remembered
+ /// while the offer is valid.
+ ///
+ /// Use a different pubkey per offer to avoid correlating offers.
+ pub fn new(description: String, signing_pubkey: PublicKey) -> Self {
+ let offer = OfferContents {
+ chains: None, metadata: None, amount: None, description,
+ features: OfferFeatures::empty(), absolute_expiry: None, issuer: None, paths: None,
+ supported_quantity: Quantity::one(), signing_pubkey: Some(signing_pubkey),
+ };
+ OfferBuilder { offer }
+ }
+
+ /// Adds the chain hash of the given [`Network`] to [`Offer::chains`]. If not called,
+ /// the chain hash of [`Network::Bitcoin`] is assumed to be the only one supported.
+ ///
+ /// See [`Offer::chains`] on how this relates to the payment currency.
+ ///
+ /// Successive calls to this method will add another chain hash.
+ pub fn chain(mut self, network: Network) -> Self {
+ let chains = self.offer.chains.get_or_insert_with(Vec::new);
+ let chain = ChainHash::using_genesis_block(network);
+ if !chains.contains(&chain) {
+ chains.push(chain);
+ }
+
+ self
+ }
+
+ /// Sets the [`Offer::metadata`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ pub fn metadata(mut self, metadata: Vec<u8>) -> Self {
+ self.offer.metadata = Some(metadata);
+ self
+ }
+
+ /// Sets the [`Offer::amount`] as an [`Amount::Bitcoin`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ pub fn amount_msats(mut self, amount_msats: u64) -> Self {
+ self.amount(Amount::Bitcoin { amount_msats })
+ }
+
+ /// Sets the [`Offer::amount`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ fn amount(mut self, amount: Amount) -> Self {
+ self.offer.amount = Some(amount);
+ self
+ }
+
+ /// Sets the [`Offer::features`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ #[cfg(test)]
+ pub fn features(mut self, features: OfferFeatures) -> Self {
+ self.offer.features = features;
+ self
+ }
+
+ /// Sets the [`Offer::absolute_expiry`] as seconds since the Unix epoch. Any expiry that has
+ /// already passed is valid and can be checked for using [`Offer::is_expired`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ pub fn absolute_expiry(mut self, absolute_expiry: Duration) -> Self {
+ self.offer.absolute_expiry = Some(absolute_expiry);
+ self
+ }
+
+ /// Sets the [`Offer::issuer`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ pub fn issuer(mut self, issuer: String) -> Self {
+ self.offer.issuer = Some(issuer);
+ self
+ }
+
+ /// Adds a blinded path to [`Offer::paths`]. Must include at least one path if only connected by
+ /// private channels or if [`Offer::signing_pubkey`] is not a public node id.
+ ///
+ /// Successive calls to this method will add another blinded path. Caller is responsible for not
+ /// adding duplicate paths.
+ pub fn path(mut self, path: BlindedPath) -> Self {
+ self.offer.paths.get_or_insert_with(Vec::new).push(path);
+ self
+ }
+
+ /// Sets the quantity of items for [`Offer::supported_quantity`]. If not called, defaults to
+ /// [`Quantity::one`].
+ ///
+ /// Successive calls to this method will override the previous setting.
+ pub fn supported_quantity(mut self, quantity: Quantity) -> Self {
+ self.offer.supported_quantity = quantity;
+ self
+ }
+
+ /// Builds an [`Offer`] from the builder's settings.
+ pub fn build(mut self) -> Result<Offer, SemanticError> {
+ match self.offer.amount {
+ Some(Amount::Bitcoin { amount_msats }) => {
+ if amount_msats > MAX_VALUE_MSAT {
+ return Err(SemanticError::InvalidAmount);
+ }
+ },
+ Some(Amount::Currency { .. }) => return Err(SemanticError::UnsupportedCurrency),
+ None => {},
+ }
+
+ if let Some(chains) = &self.offer.chains {
+ if chains.len() == 1 && chains[0] == self.offer.implied_chain() {
+ self.offer.chains = None;
+ }
+ }
+
+ let mut bytes = Vec::new();
+ self.offer.write(&mut bytes).unwrap();
+
+ Ok(Offer {
+ bytes,
+ contents: self.offer,
+ })
+ }
+}
+
+/// An `Offer` is a potentially long-lived proposal for payment of a good or service.
+///
+/// An offer is a precursor to an `InvoiceRequest`. A merchant publishes an offer from which a
+/// customer may request an `Invoice` for a specific quantity and using an amount sufficient to
+/// cover that quantity (i.e., at least `quantity * amount`). See [`Offer::amount`].
+///
+/// Offers may be denominated in currency other than bitcoin but are ultimately paid using the
+/// latter.
+///
+/// Through the use of [`BlindedPath`]s, offers provide recipient privacy.
+#[derive(Clone, Debug)]
+pub struct Offer {
+ // The serialized offer. Needed when creating an `InvoiceRequest` if the offer contains unknown
+ // fields.
+ bytes: Vec<u8>,
+ contents: OfferContents,
+}
+
+/// The contents of an [`Offer`], which may be shared with an `InvoiceRequest` or an `Invoice`.
+#[derive(Clone, Debug)]
+pub(crate) struct OfferContents {
+ chains: Option<Vec<ChainHash>>,
+ metadata: Option<Vec<u8>>,
+ amount: Option<Amount>,
+ description: String,
+ features: OfferFeatures,
+ absolute_expiry: Option<Duration>,
+ issuer: Option<String>,
+ paths: Option<Vec<BlindedPath>>,
+ supported_quantity: Quantity,
+ signing_pubkey: Option<PublicKey>,
+}
+
+impl Offer {
+ // TODO: Return a slice once ChainHash has constants.
+ // - https://github.com/rust-bitcoin/rust-bitcoin/pull/1283
+ // - https://github.com/rust-bitcoin/rust-bitcoin/pull/1286
+ /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet).
+ /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats)
+ /// for the selected chain.
+ pub fn chains(&self) -> Vec<ChainHash> {
+ self.contents.chains
+ .as_ref()
+ .cloned()
+ .unwrap_or_else(|| vec![self.contents.implied_chain()])
+ }
+
+ // TODO: Link to corresponding method in `InvoiceRequest`.
+ /// Opaque bytes set by the originator. Useful for authentication and validating fields since it
+ /// is reflected in `invoice_request` messages along with all the other fields from the `offer`.
+ pub fn metadata(&self) -> Option<&Vec<u8>> {
+ self.contents.metadata.as_ref()
+ }
+
+ /// The minimum amount required for a successful payment of a single item.
+ pub fn amount(&self) -> Option<&Amount> {
+ self.contents.amount.as_ref()
+ }
+
+ /// A complete description of the purpose of the payment. Intended to be displayed to the user
+ /// but with the caveat that it has not been verified in any way.
+ pub fn description(&self) -> PrintableString {
+ PrintableString(&self.contents.description)
+ }
+
+ /// Features pertaining to the offer.
+ pub fn features(&self) -> &OfferFeatures {
+ &self.contents.features
+ }
+
+ /// Duration since the Unix epoch when an invoice should no longer be requested.
+ ///
+ /// If `None`, the offer does not expire.
+ pub fn absolute_expiry(&self) -> Option<Duration> {
+ self.contents.absolute_expiry
+ }
+
+ /// Whether the offer has expired.
+ #[cfg(feature = "std")]
+ pub fn is_expired(&self) -> bool {
+ match self.absolute_expiry() {
+ Some(seconds_from_epoch) => match SystemTime::UNIX_EPOCH.elapsed() {
+ Ok(elapsed) => elapsed > seconds_from_epoch,
+ Err(_) => false,
+ },
+ None => false,
+ }
+ }
+
+ /// The issuer of the offer, possibly beginning with `user@domain` or `domain`. Intended to be
+ /// displayed to the user but with the caveat that it has not been verified in any way.
+ pub fn issuer(&self) -> Option<PrintableString> {
+ self.contents.issuer.as_ref().map(|issuer| PrintableString(issuer.as_str()))
+ }
+
+ /// Paths to the recipient originating from publicly reachable nodes. Blinded paths provide
+ /// recipient privacy by obfuscating its node id.
+ pub fn paths(&self) -> &[BlindedPath] {
+ self.contents.paths.as_ref().map(|paths| paths.as_slice()).unwrap_or(&[])
+ }
+
+ /// The quantity of items supported.
+ pub fn supported_quantity(&self) -> Quantity {
+ self.contents.supported_quantity()
+ }
+
+ /// The public key used by the recipient to sign invoices.
+ pub fn signing_pubkey(&self) -> PublicKey {
+ self.contents.signing_pubkey.unwrap()
+ }
+
+ #[cfg(test)]
+ fn as_tlv_stream(&self) -> OfferTlvStreamRef {
+ self.contents.as_tlv_stream()
+ }
+}
+
+impl AsRef<[u8]> for Offer {
+ fn as_ref(&self) -> &[u8] {
+ &self.bytes
+ }
+}
+
+impl OfferContents {
+ pub fn implied_chain(&self) -> ChainHash {
+ ChainHash::using_genesis_block(Network::Bitcoin)
+ }
+
+ pub fn supported_quantity(&self) -> Quantity {
+ self.supported_quantity
+ }
+
+ fn as_tlv_stream(&self) -> OfferTlvStreamRef {
+ let (currency, amount) = match &self.amount {
+ None => (None, None),
+ Some(Amount::Bitcoin { amount_msats }) => (None, Some(*amount_msats)),
+ Some(Amount::Currency { iso4217_code, amount }) => (
+ Some(iso4217_code), Some(*amount)
+ ),
+ };
+
+ let features = {
+ if self.features == OfferFeatures::empty() { None } else { Some(&self.features) }
+ };
+
+ OfferTlvStreamRef {
+ chains: self.chains.as_ref(),
+ metadata: self.metadata.as_ref(),
+ currency,
+ amount,
+ description: Some(&self.description),
+ features,
+ absolute_expiry: self.absolute_expiry.map(|duration| duration.as_secs()),
+ paths: self.paths.as_ref(),
+ issuer: self.issuer.as_ref(),
+ quantity_max: self.supported_quantity.to_tlv_record(),
+ node_id: self.signing_pubkey.as_ref(),
+ }
+ }
+}
+
+impl Writeable for Offer {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ WithoutLength(&self.bytes).write(writer)
+ }
+}
+
+impl Writeable for OfferContents {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ self.as_tlv_stream().write(writer)
+ }
+}
+
+/// The minimum amount required for an item in an [`Offer`], denominated in either bitcoin or
+/// another currency.
+#[derive(Clone, Debug, PartialEq)]
+pub enum Amount {
+ /// An amount of bitcoin.
+ Bitcoin {
+ /// The amount in millisatoshi.
+ amount_msats: u64,
+ },
+ /// An amount of currency specified using ISO 4712.
+ Currency {
+ /// The currency that the amount is denominated in.
+ iso4217_code: CurrencyCode,
+ /// The amount in the currency unit adjusted by the ISO 4712 exponent (e.g., USD cents).
+ amount: u64,
+ },
+}
+
+/// An ISO 4712 three-letter currency code (e.g., USD).
+pub type CurrencyCode = [u8; 3];
+
+/// Quantity of items supported by an [`Offer`].
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum Quantity {
+ /// Up to a specific number of items (inclusive).
+ Bounded(NonZeroU64),
+ /// One or more items.
+ Unbounded,
+}
+
+impl Quantity {
+ /// The default quantity of one.
+ pub fn one() -> Self {
+ Quantity::Bounded(NonZeroU64::new(1).unwrap())
+ }
+
+ fn to_tlv_record(&self) -> Option<u64> {
+ match self {
+ Quantity::Bounded(n) => {
+ let n = n.get();
+ if n == 1 { None } else { Some(n) }
+ },
+ Quantity::Unbounded => Some(0),
+ }
+ }
+}
+
+tlv_stream!(OfferTlvStream, OfferTlvStreamRef, 1..80, {
+ (2, chains: (Vec<ChainHash>, WithoutLength)),
+ (4, metadata: (Vec<u8>, WithoutLength)),
+ (6, currency: CurrencyCode),
+ (8, amount: (u64, HighZeroBytesDroppedBigSize)),
+ (10, description: (String, WithoutLength)),
+ (12, features: OfferFeatures),
+ (14, absolute_expiry: (u64, HighZeroBytesDroppedBigSize)),
+ (16, paths: (Vec<BlindedPath>, WithoutLength)),
+ (18, issuer: (String, WithoutLength)),
+ (20, quantity_max: (u64, HighZeroBytesDroppedBigSize)),
+ (22, node_id: PublicKey),
+});
+
+impl Bech32Encode for Offer {
+ const BECH32_HRP: &'static str = "lno";
+}
+
+impl FromStr for Offer {
+ type Err = ParseError;
+
+ fn from_str(s: &str) -> Result<Self, <Self as FromStr>::Err> {
+ Self::from_bech32_str(s)
+ }
+}
+
+impl TryFrom<Vec<u8>> for Offer {
+ type Error = ParseError;
+
+ fn try_from(bytes: Vec<u8>) -> Result<Self, Self::Error> {
+ let offer = ParsedMessage::<OfferTlvStream>::try_from(bytes)?;
+ let ParsedMessage { bytes, tlv_stream } = offer;
+ let contents = OfferContents::try_from(tlv_stream)?;
+ Ok(Offer { bytes, contents })
+ }
+}
+
+impl TryFrom<OfferTlvStream> for OfferContents {
+ type Error = SemanticError;
+
+ fn try_from(tlv_stream: OfferTlvStream) -> Result<Self, Self::Error> {
+ let OfferTlvStream {
+ chains, metadata, currency, amount, description, features, absolute_expiry, paths,
+ issuer, quantity_max, node_id,
+ } = tlv_stream;
+
+ let amount = match (currency, amount) {
+ (None, None) => None,
+ (None, Some(amount_msats)) if amount_msats > MAX_VALUE_MSAT => {
+ return Err(SemanticError::InvalidAmount);
+ },
+ (None, Some(amount_msats)) => Some(Amount::Bitcoin { amount_msats }),
+ (Some(_), None) => return Err(SemanticError::MissingAmount),
+ (Some(iso4217_code), Some(amount)) => Some(Amount::Currency { iso4217_code, amount }),
+ };
+
+ let description = match description {
+ None => return Err(SemanticError::MissingDescription),
+ Some(description) => description,
+ };
+
+ let features = features.unwrap_or_else(OfferFeatures::empty);
+
+ let absolute_expiry = absolute_expiry
+ .map(|seconds_from_epoch| Duration::from_secs(seconds_from_epoch));
+
+ let supported_quantity = match quantity_max {
+ None => Quantity::one(),
+ Some(0) => Quantity::Unbounded,
+ Some(1) => return Err(SemanticError::InvalidQuantity),
+ Some(n) => Quantity::Bounded(NonZeroU64::new(n).unwrap()),
+ };
+
+ if node_id.is_none() {
+ return Err(SemanticError::MissingSigningPubkey);
+ }
+
+ Ok(OfferContents {
+ chains, metadata, amount, description, features, absolute_expiry, issuer, paths,
+ supported_quantity, signing_pubkey: node_id,
+ })
+ }
+}
+
+impl core::fmt::Display for Offer {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
+ self.fmt_bech32_str(f)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Amount, Offer, OfferBuilder, Quantity};
+
+ use bitcoin::blockdata::constants::ChainHash;
+ use bitcoin::network::constants::Network;
+ use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+ use core::convert::TryFrom;
+ use core::num::NonZeroU64;
+ use core::time::Duration;
+ use crate::ln::features::OfferFeatures;
+ use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
+ use crate::offers::parse::{ParseError, SemanticError};
+ use crate::onion_message::{BlindedHop, BlindedPath};
+ use crate::util::ser::{BigSize, Writeable};
+ use crate::util::string::PrintableString;
+
+ fn pubkey(byte: u8) -> PublicKey {
+ let secp_ctx = Secp256k1::new();
+ PublicKey::from_secret_key(&secp_ctx, &privkey(byte))
+ }
+
+ fn privkey(byte: u8) -> SecretKey {
+ SecretKey::from_slice(&[byte; 32]).unwrap()
+ }
+
+ #[test]
+ fn builds_offer_with_defaults() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42)).build().unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ let mut buffer = Vec::new();
+ offer.write(&mut buffer).unwrap();
+
+ assert_eq!(offer.bytes, buffer.as_slice());
+ assert_eq!(offer.chains(), vec![ChainHash::using_genesis_block(Network::Bitcoin)]);
+ assert_eq!(offer.metadata(), None);
+ assert_eq!(offer.amount(), None);
+ assert_eq!(offer.description(), PrintableString("foo"));
+ assert_eq!(offer.features(), &OfferFeatures::empty());
+ assert_eq!(offer.absolute_expiry(), None);
+ #[cfg(feature = "std")]
+ assert!(!offer.is_expired());
+ assert_eq!(offer.paths(), &[]);
+ assert_eq!(offer.issuer(), None);
+ assert_eq!(offer.supported_quantity(), Quantity::one());
+ assert_eq!(offer.signing_pubkey(), pubkey(42));
+
+ assert_eq!(tlv_stream.chains, None);
+ assert_eq!(tlv_stream.metadata, None);
+ assert_eq!(tlv_stream.currency, None);
+ assert_eq!(tlv_stream.amount, None);
+ assert_eq!(tlv_stream.description, Some(&String::from("foo")));
+ assert_eq!(tlv_stream.features, None);
+ assert_eq!(tlv_stream.absolute_expiry, None);
+ assert_eq!(tlv_stream.paths, None);
+ assert_eq!(tlv_stream.issuer, None);
+ assert_eq!(tlv_stream.quantity_max, None);
+ assert_eq!(tlv_stream.node_id, Some(&pubkey(42)));
+
+ if let Err(e) = Offer::try_from(buffer) {
+ panic!("error parsing offer: {:?}", e);
+ }
+ }
+
+ #[test]
+ fn builds_offer_with_chains() {
+ let mainnet = ChainHash::using_genesis_block(Network::Bitcoin);
+ let testnet = ChainHash::using_genesis_block(Network::Testnet);
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .chain(Network::Bitcoin)
+ .build()
+ .unwrap();
+ assert_eq!(offer.chains(), vec![mainnet]);
+ assert_eq!(offer.as_tlv_stream().chains, None);
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .chain(Network::Testnet)
+ .build()
+ .unwrap();
+ assert_eq!(offer.chains(), vec![testnet]);
+ assert_eq!(offer.as_tlv_stream().chains, Some(&vec![testnet]));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .chain(Network::Testnet)
+ .chain(Network::Testnet)
+ .build()
+ .unwrap();
+ assert_eq!(offer.chains(), vec![testnet]);
+ assert_eq!(offer.as_tlv_stream().chains, Some(&vec![testnet]));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .chain(Network::Bitcoin)
+ .chain(Network::Testnet)
+ .build()
+ .unwrap();
+ assert_eq!(offer.chains(), vec![mainnet, testnet]);
+ assert_eq!(offer.as_tlv_stream().chains, Some(&vec![mainnet, testnet]));
+ }
+
+ #[test]
+ fn builds_offer_with_metadata() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .metadata(vec![42; 32])
+ .build()
+ .unwrap();
+ assert_eq!(offer.metadata(), Some(&vec![42; 32]));
+ assert_eq!(offer.as_tlv_stream().metadata, Some(&vec![42; 32]));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .metadata(vec![42; 32])
+ .metadata(vec![43; 32])
+ .build()
+ .unwrap();
+ assert_eq!(offer.metadata(), Some(&vec![43; 32]));
+ assert_eq!(offer.as_tlv_stream().metadata, Some(&vec![43; 32]));
+ }
+
+ #[test]
+ fn builds_offer_with_amount() {
+ let bitcoin_amount = Amount::Bitcoin { amount_msats: 1000 };
+ let currency_amount = Amount::Currency { iso4217_code: *b"USD", amount: 10 };
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .amount_msats(1000)
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.amount(), Some(&bitcoin_amount));
+ assert_eq!(tlv_stream.amount, Some(1000));
+ assert_eq!(tlv_stream.currency, None);
+
+ let builder = OfferBuilder::new("foo".into(), pubkey(42))
+ .amount(currency_amount.clone());
+ let tlv_stream = builder.offer.as_tlv_stream();
+ assert_eq!(builder.offer.amount, Some(currency_amount.clone()));
+ assert_eq!(tlv_stream.amount, Some(10));
+ assert_eq!(tlv_stream.currency, Some(b"USD"));
+ match builder.build() {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, SemanticError::UnsupportedCurrency),
+ }
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .amount(currency_amount.clone())
+ .amount(bitcoin_amount.clone())
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(tlv_stream.amount, Some(1000));
+ assert_eq!(tlv_stream.currency, None);
+
+ let invalid_amount = Amount::Bitcoin { amount_msats: MAX_VALUE_MSAT + 1 };
+ match OfferBuilder::new("foo".into(), pubkey(42)).amount(invalid_amount).build() {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, SemanticError::InvalidAmount),
+ }
+ }
+
+ #[test]
+ fn builds_offer_with_features() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .features(OfferFeatures::unknown())
+ .build()
+ .unwrap();
+ assert_eq!(offer.features(), &OfferFeatures::unknown());
+ assert_eq!(offer.as_tlv_stream().features, Some(&OfferFeatures::unknown()));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .features(OfferFeatures::unknown())
+ .features(OfferFeatures::empty())
+ .build()
+ .unwrap();
+ assert_eq!(offer.features(), &OfferFeatures::empty());
+ assert_eq!(offer.as_tlv_stream().features, None);
+ }
+
+ #[test]
+ fn builds_offer_with_absolute_expiry() {
+ let future_expiry = Duration::from_secs(u64::max_value());
+ let past_expiry = Duration::from_secs(0);
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .absolute_expiry(future_expiry)
+ .build()
+ .unwrap();
+ #[cfg(feature = "std")]
+ assert!(!offer.is_expired());
+ assert_eq!(offer.absolute_expiry(), Some(future_expiry));
+ assert_eq!(offer.as_tlv_stream().absolute_expiry, Some(future_expiry.as_secs()));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .absolute_expiry(future_expiry)
+ .absolute_expiry(past_expiry)
+ .build()
+ .unwrap();
+ #[cfg(feature = "std")]
+ assert!(offer.is_expired());
+ assert_eq!(offer.absolute_expiry(), Some(past_expiry));
+ assert_eq!(offer.as_tlv_stream().absolute_expiry, Some(past_expiry.as_secs()));
+ }
+
+ #[test]
+ fn builds_offer_with_paths() {
+ let paths = vec![
+ BlindedPath {
+ introduction_node_id: pubkey(40),
+ blinding_point: pubkey(41),
+ blinded_hops: vec![
+ BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
+ BlindedHop { blinded_node_id: pubkey(44), encrypted_payload: vec![0; 44] },
+ ],
+ },
+ BlindedPath {
+ introduction_node_id: pubkey(40),
+ blinding_point: pubkey(41),
+ blinded_hops: vec![
+ BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
+ BlindedHop { blinded_node_id: pubkey(46), encrypted_payload: vec![0; 46] },
+ ],
+ },
+ ];
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .path(paths[0].clone())
+ .path(paths[1].clone())
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.paths(), paths.as_slice());
+ assert_eq!(offer.signing_pubkey(), pubkey(42));
+ assert_ne!(pubkey(42), pubkey(44));
+ assert_eq!(tlv_stream.paths, Some(&paths));
+ assert_eq!(tlv_stream.node_id, Some(&pubkey(42)));
+ }
+
+ #[test]
+ fn builds_offer_with_issuer() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .issuer("bar".into())
+ .build()
+ .unwrap();
+ assert_eq!(offer.issuer(), Some(PrintableString("bar")));
+ assert_eq!(offer.as_tlv_stream().issuer, Some(&String::from("bar")));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .issuer("bar".into())
+ .issuer("baz".into())
+ .build()
+ .unwrap();
+ assert_eq!(offer.issuer(), Some(PrintableString("baz")));
+ assert_eq!(offer.as_tlv_stream().issuer, Some(&String::from("baz")));
+ }
+
+ #[test]
+ fn builds_offer_with_supported_quantity() {
+ let ten = NonZeroU64::new(10).unwrap();
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::one())
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.supported_quantity(), Quantity::one());
+ assert_eq!(tlv_stream.quantity_max, None);
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::Unbounded)
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.supported_quantity(), Quantity::Unbounded);
+ assert_eq!(tlv_stream.quantity_max, Some(0));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::Bounded(ten))
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.supported_quantity(), Quantity::Bounded(ten));
+ assert_eq!(tlv_stream.quantity_max, Some(10));
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::Bounded(ten))
+ .supported_quantity(Quantity::one())
+ .build()
+ .unwrap();
+ let tlv_stream = offer.as_tlv_stream();
+ assert_eq!(offer.supported_quantity(), Quantity::one());
+ assert_eq!(tlv_stream.quantity_max, None);
+ }
+
+ #[test]
+ fn parses_offer_with_chains() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .chain(Network::Bitcoin)
+ .chain(Network::Testnet)
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+ }
+
+ #[test]
+ fn parses_offer_with_amount() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .amount(Amount::Bitcoin { amount_msats: 1000 })
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut tlv_stream = offer.as_tlv_stream();
+ tlv_stream.amount = Some(1000);
+ tlv_stream.currency = Some(b"USD");
+
+ let mut encoded_offer = Vec::new();
+ tlv_stream.write(&mut encoded_offer).unwrap();
+
+ if let Err(e) = Offer::try_from(encoded_offer) {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut tlv_stream = offer.as_tlv_stream();
+ tlv_stream.amount = None;
+ tlv_stream.currency = Some(b"USD");
+
+ let mut encoded_offer = Vec::new();
+ tlv_stream.write(&mut encoded_offer).unwrap();
+
+ match Offer::try_from(encoded_offer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, ParseError::InvalidSemantics(SemanticError::MissingAmount)),
+ }
+
+ let mut tlv_stream = offer.as_tlv_stream();
+ tlv_stream.amount = Some(MAX_VALUE_MSAT + 1);
+ tlv_stream.currency = None;
+
+ let mut encoded_offer = Vec::new();
+ tlv_stream.write(&mut encoded_offer).unwrap();
+
+ match Offer::try_from(encoded_offer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, ParseError::InvalidSemantics(SemanticError::InvalidAmount)),
+ }
+ }
+
+ #[test]
+ fn parses_offer_with_description() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42)).build().unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut tlv_stream = offer.as_tlv_stream();
+ tlv_stream.description = None;
+
+ let mut encoded_offer = Vec::new();
+ tlv_stream.write(&mut encoded_offer).unwrap();
+
+ match Offer::try_from(encoded_offer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => {
+ assert_eq!(e, ParseError::InvalidSemantics(SemanticError::MissingDescription));
+ },
+ }
+ }
+
+ #[test]
+ fn parses_offer_with_paths() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .path(BlindedPath {
+ introduction_node_id: pubkey(40),
+ blinding_point: pubkey(41),
+ blinded_hops: vec![
+ BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
+ BlindedHop { blinded_node_id: pubkey(44), encrypted_payload: vec![0; 44] },
+ ],
+ })
+ .path(BlindedPath {
+ introduction_node_id: pubkey(40),
+ blinding_point: pubkey(41),
+ blinded_hops: vec![
+ BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
+ BlindedHop { blinded_node_id: pubkey(46), encrypted_payload: vec![0; 46] },
+ ],
+ })
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut builder = OfferBuilder::new("foo".into(), pubkey(42));
+ builder.offer.paths = Some(vec![]);
+
+ let offer = builder.build().unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+ }
+
+ #[test]
+ fn parses_offer_with_quantity() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::one())
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::Unbounded)
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let offer = OfferBuilder::new("foo".into(), pubkey(42))
+ .supported_quantity(Quantity::Bounded(NonZeroU64::new(10).unwrap()))
+ .build()
+ .unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut tlv_stream = offer.as_tlv_stream();
+ tlv_stream.quantity_max = Some(1);
+
+ let mut encoded_offer = Vec::new();
+ tlv_stream.write(&mut encoded_offer).unwrap();
+
+ match Offer::try_from(encoded_offer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => {
+ assert_eq!(e, ParseError::InvalidSemantics(SemanticError::InvalidQuantity));
+ },
+ }
+ }
+
+ #[test]
+ fn parses_offer_with_node_id() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42)).build().unwrap();
+ if let Err(e) = offer.to_string().parse::<Offer>() {
+ panic!("error parsing offer: {:?}", e);
+ }
+
+ let mut builder = OfferBuilder::new("foo".into(), pubkey(42));
+ builder.offer.signing_pubkey = None;
+
+ let offer = builder.build().unwrap();
+ match offer.to_string().parse::<Offer>() {
+ Ok(_) => panic!("expected error"),
+ Err(e) => {
+ assert_eq!(e, ParseError::InvalidSemantics(SemanticError::MissingSigningPubkey));
+ },
+ }
+ }
+
+ #[test]
+ fn fails_parsing_offer_with_extra_tlv_records() {
+ let offer = OfferBuilder::new("foo".into(), pubkey(42)).build().unwrap();
+
+ let mut encoded_offer = Vec::new();
+ offer.write(&mut encoded_offer).unwrap();
+ BigSize(80).write(&mut encoded_offer).unwrap();
+ BigSize(32).write(&mut encoded_offer).unwrap();
+ [42u8; 32].write(&mut encoded_offer).unwrap();
+
+ match Offer::try_from(encoded_offer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, ParseError::Decode(DecodeError::InvalidValue)),
+ }
+ }
+}
+
+#[cfg(test)]
+mod bech32_tests {
+ use super::{Offer, ParseError};
+ use bitcoin::bech32;
+ use crate::ln::msgs::DecodeError;
+
+ // TODO: Remove once test vectors are updated.
+ #[ignore]
+ #[test]
+ fn encodes_offer_as_bech32_without_checksum() {
+ let encoded_offer = "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy";
+ let offer = dbg!(encoded_offer.parse::<Offer>().unwrap());
+ let reencoded_offer = offer.to_string();
+ dbg!(reencoded_offer.parse::<Offer>().unwrap());
+ assert_eq!(reencoded_offer, encoded_offer);
+ }
+
+ // TODO: Remove once test vectors are updated.
+ #[ignore]
+ #[test]
+ fn parses_bech32_encoded_offers() {
+ let offers = [
+ // BOLT 12 test vectors
+ "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ "l+no1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ "l+no1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ "lno1qcp4256ypqpq+86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn0+0fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0+sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qs+y",
+ "lno1qcp4256ypqpq+ 86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn0+ 0fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0+\nsqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43l+\r\nastpwuh73k29qs+\r y",
+ // Two blinded paths
+ "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0yg06qg2qdd7t628sgykwj5kuc837qmlv9m9gr7sq8ap6erfgacv26nhp8zzcqgzhdvttlk22pw8fmwqqrvzst792mj35ypylj886ljkcmug03wg6heqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq6muh550qsfva9fdes0ruph7ctk2s8aqq06r4jxj3msc448wzwy9sqs9w6ckhlv55zuwnkuqqxc9qhu24h9rggzflyw04l9d3hcslzu340jqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ ];
+ for encoded_offer in &offers {
+ if let Err(e) = encoded_offer.parse::<Offer>() {
+ panic!("Invalid offer ({:?}): {}", e, encoded_offer);
+ }
+ }
+ }
+
+ #[test]
+ fn fails_parsing_bech32_encoded_offers_with_invalid_continuations() {
+ let offers = [
+ // BOLT 12 test vectors
+ "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy+",
+ "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy+ ",
+ "+lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ "+ lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ "ln++o1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy",
+ ];
+ for encoded_offer in &offers {
+ match encoded_offer.parse::<Offer>() {
+ Ok(_) => panic!("Valid offer: {}", encoded_offer),
+ Err(e) => assert_eq!(e, ParseError::InvalidContinuation),
+ }
+ }
+
+ }
+
+ #[test]
+ fn fails_parsing_bech32_encoded_offer_with_invalid_hrp() {
+ let encoded_offer = "lni1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsy";
+ match encoded_offer.parse::<Offer>() {
+ Ok(_) => panic!("Valid offer: {}", encoded_offer),
+ Err(e) => assert_eq!(e, ParseError::InvalidBech32Hrp),
+ }
+ }
+
+ #[test]
+ fn fails_parsing_bech32_encoded_offer_with_invalid_bech32_data() {
+ let encoded_offer = "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qso";
+ match encoded_offer.parse::<Offer>() {
+ Ok(_) => panic!("Valid offer: {}", encoded_offer),
+ Err(e) => assert_eq!(e, ParseError::Bech32(bech32::Error::InvalidChar('o'))),
+ }
+ }
+
+ #[test]
+ fn fails_parsing_bech32_encoded_offer_with_invalid_tlv_data() {
+ let encoded_offer = "lno1qcp4256ypqpq86q2pucnq42ngssx2an9wfujqerp0y2pqun4wd68jtn00fkxzcnn9ehhyec6qgqsz83qfwdpl28qqmc78ymlvhmxcsywdk5wrjnj36jryg488qwlrnzyjczlqsp9nyu4phcg6dqhlhzgxagfu7zh3d9re0sqp9ts2yfugvnnm9gxkcnnnkdpa084a6t520h5zhkxsdnghvpukvd43lastpwuh73k29qsyqqqqq";
+ match encoded_offer.parse::<Offer>() {
+ Ok(_) => panic!("Valid offer: {}", encoded_offer),
+ Err(e) => assert_eq!(e, ParseError::Decode(DecodeError::InvalidValue)),
+ }
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Parsing and formatting for bech32 message encoding.
+
+use bitcoin::bech32;
+use bitcoin::bech32::{FromBase32, ToBase32};
+use core::convert::TryFrom;
+use core::fmt;
+use crate::io;
+use crate::ln::msgs::DecodeError;
+use crate::util::ser::SeekReadable;
+
+use crate::prelude::*;
+
+/// Indicates a message can be encoded using bech32.
+pub(crate) trait Bech32Encode: AsRef<[u8]> + TryFrom<Vec<u8>, Error=ParseError> {
+ /// Human readable part of the message's bech32 encoding.
+ const BECH32_HRP: &'static str;
+
+ /// Parses a bech32-encoded message into a TLV stream.
+ fn from_bech32_str(s: &str) -> Result<Self, ParseError> {
+ // Offer encoding may be split by '+' followed by optional whitespace.
+ let encoded = match s.split('+').skip(1).next() {
+ Some(_) => {
+ for chunk in s.split('+') {
+ let chunk = chunk.trim_start();
+ if chunk.is_empty() || chunk.contains(char::is_whitespace) {
+ return Err(ParseError::InvalidContinuation);
+ }
+ }
+
+ let s = s.chars().filter(|c| *c != '+' && !c.is_whitespace()).collect::<String>();
+ Bech32String::Owned(s)
+ },
+ None => Bech32String::Borrowed(s),
+ };
+
+ let (hrp, data) = bech32::decode_without_checksum(encoded.as_ref())?;
+
+ if hrp != Self::BECH32_HRP {
+ return Err(ParseError::InvalidBech32Hrp);
+ }
+
+ let data = Vec::<u8>::from_base32(&data)?;
+ Self::try_from(data)
+ }
+
+ /// Formats the message using bech32-encoding.
+ fn fmt_bech32_str(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ bech32::encode_without_checksum_to_fmt(f, Self::BECH32_HRP, self.as_ref().to_base32())
+ .expect("HRP is invalid").unwrap();
+
+ Ok(())
+ }
+}
+
+// Used to avoid copying a bech32 string not containing the continuation character (+).
+enum Bech32String<'a> {
+ Borrowed(&'a str),
+ Owned(String),
+}
+
+impl<'a> AsRef<str> for Bech32String<'a> {
+ fn as_ref(&self) -> &str {
+ match self {
+ Bech32String::Borrowed(s) => s,
+ Bech32String::Owned(s) => s,
+ }
+ }
+}
+
+/// A wrapper for reading a message as a TLV stream `T` from a byte sequence, while still
+/// maintaining ownership of the bytes for later use.
+pub(crate) struct ParsedMessage<T: SeekReadable> {
+ pub bytes: Vec<u8>,
+ pub tlv_stream: T,
+}
+
+impl<T: SeekReadable> TryFrom<Vec<u8>> for ParsedMessage<T> {
+ type Error = DecodeError;
+
+ fn try_from(bytes: Vec<u8>) -> Result<Self, Self::Error> {
+ let mut cursor = io::Cursor::new(bytes);
+ let tlv_stream: T = SeekReadable::read(&mut cursor)?;
+
+ // Ensure that there are no more TLV records left to parse.
+ if cursor.position() < cursor.get_ref().len() as u64 {
+ return Err(DecodeError::InvalidValue);
+ }
+
+ let bytes = cursor.into_inner();
+ Ok(Self { bytes, tlv_stream })
+ }
+}
+
+/// Error when parsing a bech32 encoded message using [`str::parse`].
+#[derive(Debug, PartialEq)]
+pub enum ParseError {
+ /// The bech32 encoding does not conform to the BOLT 12 requirements for continuing messages
+ /// across multiple parts (i.e., '+' followed by whitespace).
+ InvalidContinuation,
+ /// The bech32 encoding's human-readable part does not match what was expected for the message
+ /// being parsed.
+ InvalidBech32Hrp,
+ /// The string could not be bech32 decoded.
+ Bech32(bech32::Error),
+ /// The bech32 decoded string could not be decoded as the expected message type.
+ Decode(DecodeError),
+ /// The parsed message has invalid semantics.
+ InvalidSemantics(SemanticError),
+}
+
+/// Error when interpreting a TLV stream as a specific type.
+#[derive(Debug, PartialEq)]
+pub enum SemanticError {
+ /// An amount was expected but was missing.
+ MissingAmount,
+ /// The amount exceeded the total bitcoin supply.
+ InvalidAmount,
+ /// A currency was provided that is not supported.
+ UnsupportedCurrency,
+ /// A required description was not provided.
+ MissingDescription,
+ /// A signing pubkey was not provided.
+ MissingSigningPubkey,
+ /// An unsupported quantity was provided.
+ InvalidQuantity,
+}
+
+impl From<bech32::Error> for ParseError {
+ fn from(error: bech32::Error) -> Self {
+ Self::Bech32(error)
+ }
+}
+
+impl From<DecodeError> for ParseError {
+ fn from(error: DecodeError) -> Self {
+ Self::Decode(error)
+ }
+}
+
+impl From<SemanticError> for ParseError {
+ fn from(error: SemanticError) -> Self {
+ Self::InvalidSemantics(error)
+ }
+}
/// Onion messages can be sent and received to blinded routes, which serve to hide the identity of
/// the recipient.
+#[derive(Clone, Debug, PartialEq)]
pub struct BlindedRoute {
/// To send to a blinded route, the sender first finds a route to the unblinded
/// `introduction_node_id`, which can unblind its [`encrypted_payload`] to find out the onion
/// message's next hop and forward it along.
///
/// [`encrypted_payload`]: BlindedHop::encrypted_payload
- pub(super) introduction_node_id: PublicKey,
+ pub(crate) introduction_node_id: PublicKey,
/// Used by the introduction node to decrypt its [`encrypted_payload`] to forward the onion
/// message.
///
/// [`encrypted_payload`]: BlindedHop::encrypted_payload
- pub(super) blinding_point: PublicKey,
+ pub(crate) blinding_point: PublicKey,
/// The hops composing the blinded route.
- pub(super) blinded_hops: Vec<BlindedHop>,
+ pub(crate) blinded_hops: Vec<BlindedHop>,
}
/// Used to construct the blinded hops portion of a blinded route. These hops cannot be identified
/// by outside observers and thus can be used to hide the identity of the recipient.
+#[derive(Clone, Debug, PartialEq)]
pub struct BlindedHop {
/// The blinded node id of this hop in a blinded route.
- pub(super) blinded_node_id: PublicKey,
+ pub(crate) blinded_node_id: PublicKey,
/// The encrypted payload intended for this hop in a blinded route.
// The node sending to this blinded route will later encode this payload into the onion packet for
// this hop.
- pub(super) encrypted_payload: Vec<u8>,
+ pub(crate) encrypted_payload: Vec<u8>,
}
impl BlindedRoute {
use crate::ln::features::InitFeatures;
use crate::ln::msgs::{self, DecodeError, OnionMessageHandler};
use super::{BlindedRoute, CustomOnionMessageContents, CustomOnionMessageHandler, Destination, OnionMessageContents, OnionMessenger, SendError};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
-use crate::util::ser::{ Writeable, Writer};
+use crate::util::ser::{Writeable, Writer};
use crate::util::test_utils;
use bitcoin::network::constants::Network;
struct MessengerNode {
keys_manager: Arc<test_utils::TestKeysInterface>,
- messenger: OnionMessenger<EnforcingSigner, Arc<test_utils::TestKeysInterface>, Arc<test_utils::TestLogger>, Arc<TestCustomMessageHandler>>,
+ messenger: OnionMessenger<Arc<test_utils::TestKeysInterface>, Arc<test_utils::TestLogger>, Arc<TestCustomMessageHandler>>,
logger: Arc<test_utils::TestLogger>,
}
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
-use crate::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
+use crate::chain::keysinterface::{KeysInterface, KeysManager, Recipient};
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs::{self, OnionMessageHandler};
use crate::ln::onion_utils;
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
-pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref, CMH: Deref>
- where K::Target: KeysInterface<Signer = Signer>,
+pub struct OnionMessenger<K: Deref, L: Deref, CMH: Deref>
+ where K::Target: KeysInterface,
L::Target: Logger,
CMH:: Target: CustomOnionMessageHandler,
{
fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError>;
}
-impl<Signer: Sign, K: Deref, L: Deref, CMH: Deref> OnionMessenger<Signer, K, L, CMH>
- where K::Target: KeysInterface<Signer = Signer>,
+impl<K: Deref, L: Deref, CMH: Deref> OnionMessenger<K, L, CMH>
+ where K::Target: KeysInterface,
L::Target: Logger,
CMH::Target: CustomOnionMessageHandler,
{
false
}
-impl<Signer: Sign, K: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L, CMH>
- where K::Target: KeysInterface<Signer = Signer>,
+impl<K: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMessenger<K, L, CMH>
+ where K::Target: KeysInterface,
L::Target: Logger,
CMH::Target: CustomOnionMessageHandler + Sized,
{
}
}
-impl<Signer: Sign, K: Deref, L: Deref, CMH: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L, CMH>
- where K::Target: KeysInterface<Signer = Signer>,
+impl<K: Deref, L: Deref, CMH: Deref> OnionMessageProvider for OnionMessenger<K, L, CMH>
+ where K::Target: KeysInterface,
L::Target: Logger,
CMH::Target: CustomOnionMessageHandler,
{
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
-pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>, IgnoringMessageHandler>;
+pub type SimpleArcOnionMessenger<L> = OnionMessenger<Arc<KeysManager>, Arc<L>, IgnoringMessageHandler>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
-pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L, IgnoringMessageHandler>;
+pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<&'a KeysManager, &'b L, IgnoringMessageHandler>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
mod functional_tests;
// Re-export structs so they can be imported with just the `onion_message::` module prefix.
-pub use self::blinded_route::{BlindedRoute, BlindedHop};
+pub use self::blinded_route::{BlindedRoute, BlindedRoute as BlindedPath, BlindedHop};
pub use self::messenger::{CustomOnionMessageContents, CustomOnionMessageHandler, Destination, OnionMessageContents, OnionMessenger, SendError, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
pub(crate) use self::packet::Packet;
match &self.0 {
Payload::Forward(ForwardControlTlvs::Blinded(encrypted_bytes)) => {
encode_varint_length_prefixed_tlv!(w, {
- (4, encrypted_bytes, vec_type)
+ (4, *encrypted_bytes, vec_type)
})
},
Payload::Receive {
} => {
encode_varint_length_prefixed_tlv!(w, {
(2, reply_path, option),
- (4, encrypted_bytes, vec_type),
+ (4, *encrypted_bytes, vec_type),
(message.tlv_type(), message, required)
})
},
use crate::ln::msgs;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable};
use crate::util::logger::{Logger, Level};
-use crate::util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider};
+use crate::util::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
+use crate::util::string::PrintableString;
use crate::io;
use crate::io_extras::{copy, sink};
/// This network graph is then used for routing payments.
/// Provides interface to help with initial routing sync by
/// serving historical announcements.
-///
-/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentPathFailed`] to the
-/// [`NetworkGraph`].
pub struct P2PGossipSync<G: Deref<Target=NetworkGraph<L>>, C: Deref, L: Deref>
where C::Target: chain::Access, L::Target: Logger
{
}
}
-impl<L: Deref> EventHandler for NetworkGraph<L> where L::Target: Logger {
- fn handle_event(&self, event: &Event) {
- if let Event::PaymentPathFailed { network_update, .. } = event {
- if let Some(network_update) = network_update {
- match *network_update {
- NetworkUpdate::ChannelUpdateMessage { ref msg } => {
- let short_channel_id = msg.contents.short_channel_id;
- let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
- let status = if is_enabled { "enabled" } else { "disabled" };
- log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status);
- let _ = self.update_channel(msg);
- },
- NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
- let action = if is_permanent { "Removing" } else { "Disabling" };
- log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", action, short_channel_id);
- self.channel_failed(short_channel_id, is_permanent);
- },
- NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
- if is_permanent {
- log_debug!(self.logger,
- "Removed node graph entry for {} due to a payment failure.", log_pubkey!(node_id));
- self.node_failed_permanent(node_id);
- };
- },
- }
- }
+impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
+ /// Handles any network updates originating from [`Event`]s.
+ ///
+ /// [`Event`]: crate::util::events::Event
+ pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
+ match *network_update {
+ NetworkUpdate::ChannelUpdateMessage { ref msg } => {
+ let short_channel_id = msg.contents.short_channel_id;
+ let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
+ let status = if is_enabled { "enabled" } else { "disabled" };
+ log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status);
+ let _ = self.update_channel(msg);
+ },
+ NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
+ let action = if is_permanent { "Removing" } else { "Disabling" };
+ log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", action, short_channel_id);
+ self.channel_failed(short_channel_id, is_permanent);
+ },
+ NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
+ if is_permanent {
+ log_debug!(self.logger,
+ "Removed node graph entry for {} due to a payment failure.", log_pubkey!(node_id));
+ self.node_failed_permanent(node_id);
+ };
+ },
}
}
}
impl fmt::Display for NodeAlias {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
- let control_symbol = core::char::REPLACEMENT_CHARACTER;
let first_null = self.0.iter().position(|b| *b == 0).unwrap_or(self.0.len());
let bytes = self.0.split_at(first_null).0;
match core::str::from_utf8(bytes) {
- Ok(alias) => {
- for c in alias.chars() {
- let mut bytes = [0u8; 4];
- let c = if !c.is_control() { c } else { control_symbol };
- f.write_str(c.encode_utf8(&mut bytes))?;
- }
- },
+ Ok(alias) => PrintableString(alias).fmt(f)?,
Err(_) => {
+ use core::fmt::Write;
for c in bytes.iter().map(|b| *b as char) {
// Display printable ASCII characters
- let mut bytes = [0u8; 4];
+ let control_symbol = core::char::REPLACEMENT_CHARACTER;
let c = if c >= '\x20' && c <= '\x7e' { c } else { control_symbol };
- f.write_str(c.encode_utf8(&mut bytes))?;
+ f.write_char(c)?;
}
},
};
use crate::chain;
use crate::ln::channelmanager;
use crate::ln::chan_utils::make_funding_redeemscript;
- use crate::ln::PaymentHash;
use crate::ln::features::InitFeatures;
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
use crate::util::test_utils;
use crate::util::ser::{ReadableArgs, Writeable};
- use crate::util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider};
+ use crate::util::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::util::scid_utils::scid_from_parts;
use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
- network_graph.handle_event(&Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([0; 32]),
- payment_failed_permanently: false,
- all_paths_failed: true,
- path: vec![],
- network_update: Some(NetworkUpdate::ChannelUpdateMessage {
- msg: valid_channel_update,
- }),
- short_channel_id: None,
- retry: None,
- error_code: None,
- error_data: None,
+ network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage {
+ msg: valid_channel_update,
});
assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some());
}
};
- network_graph.handle_event(&Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([0; 32]),
- payment_failed_permanently: false,
- all_paths_failed: true,
- path: vec![],
- network_update: Some(NetworkUpdate::ChannelFailure {
- short_channel_id,
- is_permanent: false,
- }),
- short_channel_id: None,
- retry: None,
- error_code: None,
- error_data: None,
+ network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
+ short_channel_id,
+ is_permanent: false,
});
match network_graph.read_only().channels().get(&short_channel_id) {
}
// Permanent closing deletes a channel
- network_graph.handle_event(&Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([0; 32]),
- payment_failed_permanently: false,
- all_paths_failed: true,
- path: vec![],
- network_update: Some(NetworkUpdate::ChannelFailure {
- short_channel_id,
- is_permanent: true,
- }),
- short_channel_id: None,
- retry: None,
- error_code: None,
- error_data: None,
+ network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
+ short_channel_id,
+ is_permanent: true,
});
assert_eq!(network_graph.read_only().channels().len(), 0);
assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
// Non-permanent node failure does not delete any nodes or channels
- network_graph.handle_event(&Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([0; 32]),
- payment_failed_permanently: false,
- all_paths_failed: true,
- path: vec![],
- network_update: Some(NetworkUpdate::NodeFailure {
- node_id: node_2_id,
- is_permanent: false,
- }),
- short_channel_id: None,
- retry: None,
- error_code: None,
- error_data: None,
+ network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
+ node_id: node_2_id,
+ is_permanent: false,
});
assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
assert!(network_graph.read_only().nodes().get(&NodeId::from_pubkey(&node_2_id)).is_some());
// Permanent node failure deletes node and its channels
- network_graph.handle_event(&Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([0; 32]),
- payment_failed_permanently: false,
- all_paths_failed: true,
- path: vec![],
- network_update: Some(NetworkUpdate::NodeFailure {
- node_id: node_2_id,
- is_permanent: true,
- }),
- short_channel_id: None,
- retry: None,
- error_code: None,
- error_data: None,
+ network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
+ node_id: node_2_id,
+ is_permanent: true,
});
assert_eq!(network_graph.read_only().nodes().len(), 0);
) -> Result<Route, LightningError>;
}
-/// A map with liquidity value (in msat) keyed by a short channel id and the direction the HTLC
-/// is traveling in. The direction boolean is determined by checking if the HTLC source's public
-/// key is less than its destination. See [`InFlightHtlcs::used_liquidity_msat`] for more
-/// details.
-#[cfg(not(any(test, feature = "_test_utils")))]
-pub struct InFlightHtlcs(HashMap<(u64, bool), u64>);
-#[cfg(any(test, feature = "_test_utils"))]
-pub struct InFlightHtlcs(pub HashMap<(u64, bool), u64>);
+/// A data structure for tracking in-flight HTLCs. May be used during pathfinding to account for
+/// in-use channel liquidity.
+pub struct InFlightHtlcs(
+ // A map with liquidity value (in msat) keyed by a short channel id and the direction the HTLC
+ // is traveling in. The direction boolean is determined by checking if the HTLC source's public
+ // key is less than its destination. See `InFlightHtlcs::used_liquidity_msat` for more
+ // details.
+ HashMap<(u64, bool), u64>
+);
impl InFlightHtlcs {
- /// Create a new `InFlightHtlcs` via a mapping from:
- /// (short_channel_id, source_pubkey < target_pubkey) -> used_liquidity_msat
- pub fn new(inflight_map: HashMap<(u64, bool), u64>) -> Self {
- InFlightHtlcs(inflight_map)
+ /// Constructs an empty `InFlightHtlcs`.
+ pub fn new() -> Self { InFlightHtlcs(HashMap::new()) }
+
+ /// Takes in a path with payer's node id and adds the path's details to `InFlightHtlcs`.
+ pub fn process_path(&mut self, path: &[RouteHop], payer_node_id: PublicKey) {
+ if path.is_empty() { return };
+ // total_inflight_map needs to be direction-sensitive when keeping track of the HTLC value
+ // that is held up. However, the `hops` array, which is a path returned by `find_route` in
+ // the router excludes the payer node. In the following lines, the payer's information is
+ // hardcoded with an inflight value of 0 so that we can correctly represent the first hop
+ // in our sliding window of two.
+ let reversed_hops_with_payer = path.iter().rev().skip(1)
+ .map(|hop| hop.pubkey)
+ .chain(core::iter::once(payer_node_id));
+ let mut cumulative_msat = 0;
+
+ // Taking the reversed vector from above, we zip it with just the reversed hops list to
+ // work "backwards" of the given path, since the last hop's `fee_msat` actually represents
+ // the total amount sent.
+ for (next_hop, prev_hop) in path.iter().rev().zip(reversed_hops_with_payer) {
+ cumulative_msat += next_hop.fee_msat;
+ self.0
+ .entry((next_hop.short_channel_id, NodeId::from_pubkey(&prev_hop) < NodeId::from_pubkey(&next_hop.pubkey)))
+ .and_modify(|used_liquidity_msat| *used_liquidity_msat += cumulative_msat)
+ .or_insert(cumulative_msat);
+ }
}
/// Returns liquidity in msat given the public key of the HTLC source, target, and short channel
const MEDIAN_HOP_CLTV_EXPIRY_DELTA: u32 = 40;
// During routing, we only consider paths shorter than our maximum length estimate.
-// In the legacy onion format, the maximum number of hops used to be a fixed value of 20.
-// However, in the TLV onion format, there is no fixed maximum length, but the `hop_payloads`
+// In the TLV onion format, there is no fixed maximum length, but the `hop_payloads`
// field is always 1300 bytes. As the `tlv_payload` for each hop may vary in length, we have to
// estimate how many hops the route may have so that it actually fits the `hop_payloads` field.
//
inbound_capacity_msat: 42,
unspendable_punishment_reserve: None,
confirmations_required: None,
+ confirmations: None,
force_close_spend_delay: None,
is_outbound: true, is_channel_ready: true,
is_usable: true, is_public: true,
inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
+ confirmations: None,
force_close_spend_delay: None,
is_outbound: true,
is_channel_ready: true,
for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
assert!(this_htlc.transaction_output_index.is_some());
let keys = trusted_tx.keys();
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.opt_anchors(), false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.opt_anchors(), &keys);
use crate::ln::msgs::DecodeError;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::routing::gossip::NetworkUpdate;
-use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper, OptionDeserWrapper};
+use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, WithoutLength, OptionDeserWrapper};
use crate::routing::router::{RouteHop, RouteParameters};
use bitcoin::{PackedLockTime, Transaction};
channel_value_satoshis: u64,
/// The script which should be used in the transaction output.
output_script: Script,
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or 0 for
- /// an inbound channel.
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or a
+ /// random value for an inbound channel. This may be zero for objects serialized with LDK
+ /// versions prior to 0.0.113.
///
/// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
- user_channel_id: u64,
+ user_channel_id: u128,
},
/// Indicates we've received (an offer of) money! Just gotta dig out that payment preimage and
/// feed it to [`ChannelManager::claim_funds`] to get it....
/// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
/// [`ChannelManager::fail_htlc_backwards`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards
PaymentReceived {
+ /// The node that received the payment.
+ /// This is useful to identify payments which were received via [phantom node payments].
+ /// This field will always be filled in when the event was generated by LDK versions
+ /// 0.0.113 and above.
+ ///
+ /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ receiver_node_id: Option<PublicKey>,
/// The hash for which the preimage should be handed to the ChannelManager. Note that LDK will
/// not stop you from registering duplicate payment hashes for inbound payments.
payment_hash: PaymentHash,
/// Information for claiming this received payment, based on whether the purpose of the
/// payment is to pay an invoice or to send a spontaneous payment.
purpose: PaymentPurpose,
+ /// The `channel_id` indicating over which channel we received the payment.
+ via_channel_id: Option<[u8; 32]>,
+ /// The `user_channel_id` indicating over which channel we received the payment.
+ via_user_channel_id: Option<u128>,
},
/// Indicates a payment has been claimed and we've received money!
///
///
/// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
PaymentClaimed {
+ /// The node that received the payment.
+ /// This is useful to identify payments which were received via [phantom node payments].
+ /// This field will always be filled in when the event was generated by LDK versions
+ /// 0.0.113 and above.
+ ///
+ /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ receiver_node_id: Option<PublicKey>,
/// The payment hash of the claimed payment. Note that LDK will not stop you from
/// registering duplicate payment hashes for inbound payments.
payment_hash: PaymentHash,
/// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
/// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
- /// `user_channel_id` will be 0 for an inbound channel.
+ /// `user_channel_id` will be randomized for an inbound channel.
///
/// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- user_channel_id: u64,
+ user_channel_id: u128,
/// The node_id of the channel counterparty.
counterparty_node_id: PublicKey,
/// The features that this channel will operate with.
/// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
/// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
- /// `user_channel_id` will be 0 for an inbound channel.
- /// This will always be zero for objects serialized with LDK versions prior to 0.0.102.
+ /// `user_channel_id` will be randomized for inbound channels.
+ /// This may be zero for inbound channels serialized prior to 0.0.113 and will always be
+ /// zero for objects serialized with LDK versions prior to 0.0.102.
///
/// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- user_channel_id: u64,
+ user_channel_id: u128,
/// The reason the channel was closed.
reason: ClosureReason
},
// We never write out FundingGenerationReady events as, upon disconnection, peers
// drop any channels which have not yet exchanged funding_signed.
},
- &Event::PaymentReceived { ref payment_hash, ref amount_msat, ref purpose } => {
+ &Event::PaymentReceived { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id, ref via_channel_id, ref via_user_channel_id } => {
1u8.write(writer)?;
let mut payment_secret = None;
let payment_preimage;
}
write_tlv_fields!(writer, {
(0, payment_hash, required),
+ (1, receiver_node_id, option),
(2, payment_secret, option),
+ (3, via_channel_id, option),
(4, amount_msat, required),
+ (5, via_user_channel_id, option),
(6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier
(8, payment_preimage, option),
});
(1, network_update, option),
(2, payment_failed_permanently, required),
(3, all_paths_failed, required),
- (5, path, vec_type),
+ (5, *path, vec_type),
(7, short_channel_id, option),
(9, retry, option),
(11, payment_id, option),
&Event::SpendableOutputs { ref outputs } => {
5u8.write(writer)?;
write_tlv_fields!(writer, {
- (0, VecWriteWrapper(outputs), required),
+ (0, WithoutLength(outputs), required),
});
},
&Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
},
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
9u8.write(writer)?;
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards
+ // compatible with versions prior to 0.0.113, the u128 is serialized as two
+ // separate u64 values.
+ let user_channel_id_low = *user_channel_id as u64;
+ let user_channel_id_high = (*user_channel_id >> 64) as u64;
write_tlv_fields!(writer, {
(0, channel_id, required),
- (1, user_channel_id, required),
- (2, reason, required)
+ (1, user_channel_id_low, required),
+ (2, reason, required),
+ (3, user_channel_id_high, required),
});
},
&Event::DiscardFunding { ref channel_id, ref transaction } => {
write_tlv_fields!(writer, {
(0, payment_id, required),
(2, payment_hash, option),
- (4, path, vec_type)
+ (4, *path, vec_type)
})
},
&Event::PaymentFailed { ref payment_id, ref payment_hash } => {
// We never write the OpenChannelRequest events as, upon disconnection, peers
// drop any channels which have not yet exchanged funding_signed.
},
- &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose } => {
+ &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id } => {
19u8.write(writer)?;
write_tlv_fields!(writer, {
(0, payment_hash, required),
+ (1, receiver_node_id, option),
(2, purpose, required),
(4, amount_msat, required),
});
write_tlv_fields!(writer, {
(0, payment_id, required),
(2, payment_hash, required),
- (4, path, vec_type)
+ (4, *path, vec_type)
})
},
&Event::ProbeFailed { ref payment_id, ref payment_hash, ref path, ref short_channel_id } => {
write_tlv_fields!(writer, {
(0, payment_id, required),
(2, payment_hash, required),
- (4, path, vec_type),
+ (4, *path, vec_type),
(6, short_channel_id, option),
})
},
let mut payment_preimage = None;
let mut payment_secret = None;
let mut amount_msat = 0;
+ let mut receiver_node_id = None;
let mut _user_payment_id = None::<u64>; // For compatibility with 0.0.103 and earlier
+ let mut via_channel_id = None;
+ let mut via_user_channel_id = None;
read_tlv_fields!(reader, {
(0, payment_hash, required),
+ (1, receiver_node_id, option),
(2, payment_secret, option),
+ (3, via_channel_id, option),
(4, amount_msat, required),
+ (5, via_user_channel_id, option),
(6, _user_payment_id, option),
(8, payment_preimage, option),
});
None => return Err(msgs::DecodeError::InvalidValue),
};
Ok(Some(Event::PaymentReceived {
+ receiver_node_id,
payment_hash,
amount_msat,
purpose,
+ via_channel_id,
+ via_user_channel_id,
}))
};
f()
4u8 => Ok(None),
5u8 => {
let f = || {
- let mut outputs = VecReadWrapper(Vec::new());
+ let mut outputs = WithoutLength(Vec::new());
read_tlv_fields!(reader, {
(0, outputs, required),
});
let f = || {
let mut channel_id = [0; 32];
let mut reason = None;
- let mut user_channel_id_opt = None;
+ let mut user_channel_id_low_opt: Option<u64> = None;
+ let mut user_channel_id_high_opt: Option<u64> = None;
read_tlv_fields!(reader, {
(0, channel_id, required),
- (1, user_channel_id_opt, option),
+ (1, user_channel_id_low_opt, option),
(2, reason, ignorable),
+ (3, user_channel_id_high_opt, option),
});
if reason.is_none() { return Ok(None); }
- let user_channel_id = if let Some(id) = user_channel_id_opt { id } else { 0 };
+
+ // `user_channel_id` used to be a single u64 value. In order to remain
+ // backwards compatible with versions prior to 0.0.113, the u128 is serialized
+ // as two separate u64 values.
+ let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
+ ((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
+
Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: reason.unwrap() }))
};
f()
let mut payment_hash = PaymentHash([0; 32]);
let mut purpose = None;
let mut amount_msat = 0;
+ let mut receiver_node_id = None;
read_tlv_fields!(reader, {
(0, payment_hash, required),
+ (1, receiver_node_id, option),
(2, purpose, ignorable),
(4, amount_msat, required),
});
if purpose.is_none() { return Ok(None); }
Ok(Some(Event::PaymentClaimed {
+ receiver_node_id,
payment_hash,
purpose: purpose.unwrap(),
amount_msat,
29u8 => {
let f = || {
let mut channel_id = [0; 32];
- let mut user_channel_id: u64 = 0;
+ let mut user_channel_id: u128 = 0;
let mut counterparty_node_id = OptionDeserWrapper(None);
let mut channel_type = OptionDeserWrapper(None);
read_tlv_fields!(reader, {
///
/// Events are processed by passing an [`EventHandler`] to [`process_pending_events`].
///
+/// Implementations of this trait may also feature an async version of event handling, as shown with
+/// [`ChannelManager::process_pending_events_async`] and
+/// [`ChainMonitor::process_pending_events_async`].
+///
/// # Requirements
///
/// When using this trait, [`process_pending_events`] will call [`handle_event`] for each pending
/// [`handle_event`]: EventHandler::handle_event
/// [`ChannelManager::process_pending_events`]: crate::ln::channelmanager::ChannelManager#method.process_pending_events
/// [`ChainMonitor::process_pending_events`]: crate::chain::chainmonitor::ChainMonitor#method.process_pending_events
+/// [`ChannelManager::process_pending_events_async`]: crate::ln::channelmanager::ChannelManager::process_pending_events_async
+/// [`ChainMonitor::process_pending_events_async`]: crate::chain::chainmonitor::ChainMonitor::process_pending_events_async
pub trait EventsProvider {
/// Processes any events generated since the last call using the given event handler.
///
}
/// A trait implemented for objects handling events from [`EventsProvider`].
+///
+/// An async variation also exists for implementations of [`EventsProvider`] that support async
+/// event handling. The async event handler should satisfy the generic bounds: `F:
+/// core::future::Future, H: Fn(Event) -> F`.
pub trait EventHandler {
/// Handles the given [`Event`].
///
/// See [`EventsProvider`] for details that must be considered when implementing this method.
- fn handle_event(&self, event: &Event);
+ fn handle_event(&self, event: Event);
}
-impl<F> EventHandler for F where F: Fn(&Event) {
- fn handle_event(&self, event: &Event) {
+impl<F> EventHandler for F where F: Fn(Event) {
+ fn handle_event(&self, event: Event) {
self(event)
}
}
impl<T: EventHandler> EventHandler for Arc<T> {
- fn handle_event(&self, event: &Event) {
+ fn handle_event(&self, event: Event) {
self.deref().handle_event(event)
}
}
pub mod message_signing;
pub mod invoice;
pub mod persist;
+pub mod string;
pub mod wakers;
pub(crate) mod atomic_counter;
}
/// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
-pub trait Persister<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S: WriteableScore<'a>>
- where M::Target: 'static + chain::Watch<Signer>,
+pub trait Persister<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S: WriteableScore<'a>>
+ where M::Target: 'static + chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: 'static + BroadcasterInterface,
- K::Target: 'static + KeysInterface<Signer = Signer>,
+ K::Target: 'static + KeysInterface,
F::Target: 'static + FeeEstimator,
L::Target: 'static + Logger,
{
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
}
-impl<'a, A: KVStorePersister, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, Signer, M, T, K, F, L, S> for A
- where M::Target: 'static + chain::Watch<Signer>,
+impl<'a, A: KVStorePersister, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, K, F, L, S> for A
+ where M::Target: 'static + chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: 'static + BroadcasterInterface,
- K::Target: 'static + KeysInterface<Signer = Signer>,
+ K::Target: 'static + KeysInterface,
F::Target: 'static + FeeEstimator,
L::Target: 'static + Logger,
{
pub(crate) mod fake_scid {
use bitcoin::hash_types::BlockHash;
use bitcoin::hashes::hex::FromHex;
- use crate::chain::keysinterface::{Sign, KeysInterface};
+ use crate::chain::keysinterface::KeysInterface;
use crate::util::chacha20::ChaCha20;
use crate::util::scid_utils;
use core::convert::TryInto;
use core::ops::Deref;
- const TEST_SEGWIT_ACTIVATION_HEIGHT: u32 = 0;
+ const TEST_SEGWIT_ACTIVATION_HEIGHT: u32 = 1;
const MAINNET_SEGWIT_ACTIVATION_HEIGHT: u32 = 481_824;
const MAX_TX_INDEX: u32 = 2_500;
const MAX_NAMESPACES: u8 = 8; // We allocate 3 bits for the namespace identifier.
/// between segwit activation and the current best known height, and the tx index and output
/// index are also selected from a "reasonable" range. We add this logic because it makes it
/// non-obvious at a glance that the scid is fake, e.g. if it appears in invoice route hints.
- pub(crate) fn get_fake_scid<Signer: Sign, K: Deref>(&self, highest_seen_blockheight: u32, genesis_hash: &BlockHash, fake_scid_rand_bytes: &[u8; 32], keys_manager: &K) -> u64
- where K::Target: KeysInterface<Signer = Signer>,
+ pub(crate) fn get_fake_scid<K: Deref>(&self, highest_seen_blockheight: u32, genesis_hash: &BlockHash, fake_scid_rand_bytes: &[u8; 32], keys_manager: &K) -> u64
+ where K::Target: KeysInterface,
{
// Ensure we haven't created a namespace that doesn't fit into the 3 bits we've allocated for
// namespaces.
}
/// Returns whether the given fake scid falls into the given namespace.
- pub fn is_valid_phantom(fake_scid_rand_bytes: &[u8; 32], scid: u64) -> bool {
+ pub fn is_valid_phantom(fake_scid_rand_bytes: &[u8; 32], scid: u64, genesis_hash: &BlockHash) -> bool {
let block_height = scid_utils::block_from_scid(&scid);
let tx_index = scid_utils::tx_index_from_scid(&scid);
let namespace = Namespace::Phantom;
let valid_vout = namespace.get_encrypted_vout(block_height, tx_index, fake_scid_rand_bytes);
- valid_vout == scid_utils::vout_from_scid(&scid) as u8
+ block_height >= segwit_activation_height(genesis_hash)
+ && valid_vout == scid_utils::vout_from_scid(&scid) as u8
}
#[cfg(test)]
fn test_is_valid_phantom() {
let namespace = Namespace::Phantom;
let fake_scid_rand_bytes = [0; 32];
+ let testnet_genesis = genesis_block(Network::Testnet).header.block_hash();
let valid_encrypted_vout = namespace.get_encrypted_vout(0, 0, &fake_scid_rand_bytes);
- let valid_fake_scid = scid_utils::scid_from_parts(0, 0, valid_encrypted_vout as u64).unwrap();
- assert!(is_valid_phantom(&fake_scid_rand_bytes, valid_fake_scid));
- let invalid_fake_scid = scid_utils::scid_from_parts(0, 0, 12).unwrap();
- assert!(!is_valid_phantom(&fake_scid_rand_bytes, invalid_fake_scid));
+ let valid_fake_scid = scid_utils::scid_from_parts(1, 0, valid_encrypted_vout as u64).unwrap();
+ assert!(is_valid_phantom(&fake_scid_rand_bytes, valid_fake_scid, &testnet_genesis));
+ let invalid_fake_scid = scid_utils::scid_from_parts(1, 0, 12).unwrap();
+ assert!(!is_valid_phantom(&fake_scid_rand_bytes, invalid_fake_scid, &testnet_genesis));
}
#[test]
//! as ChannelsManagers and ChannelMonitors.
use crate::prelude::*;
-use crate::io::{self, Read, Write};
+use crate::io::{self, Read, Seek, Write};
use crate::io_extras::{copy, sink};
use core::hash::Hash;
use crate::sync::Mutex;
use bitcoin::secp256k1::{PublicKey, SecretKey};
use bitcoin::secp256k1::constants::{PUBLIC_KEY_SIZE, SECRET_KEY_SIZE, COMPACT_SIGNATURE_SIZE};
use bitcoin::secp256k1::ecdsa::Signature;
+use bitcoin::blockdata::constants::ChainHash;
use bitcoin::blockdata::script::Script;
use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
use bitcoin::consensus;
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError>;
}
+/// A trait that various rust-lightning types implement allowing them to be read in from a
+/// `Read + Seek`.
+pub(crate) trait SeekReadable where Self: Sized {
+ /// Reads a Self in from the given Read
+ fn read<R: Read + Seek>(reader: &mut R) -> Result<Self, DecodeError>;
+}
+
/// A trait that various higher-level rust-lightning types implement allowing them to be read in
/// from a Read given some additional set of arguments which is required to deserialize.
///
fn from(t: T) -> OptionDeserWrapper<T> { OptionDeserWrapper(Some(t)) }
}
-/// Wrapper to write each element of a Vec with no length prefix
-pub(crate) struct VecWriteWrapper<'a, T: Writeable>(pub &'a Vec<T>);
-impl<'a, T: Writeable> Writeable for VecWriteWrapper<'a, T> {
- #[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- for ref v in self.0.iter() {
- v.write(writer)?;
- }
- Ok(())
- }
-}
-
-/// Wrapper to read elements from a given stream until it reaches the end of the stream.
-pub(crate) struct VecReadWrapper<T>(pub Vec<T>);
-impl<T: MaybeReadable> Readable for VecReadWrapper<T> {
- #[inline]
- fn read<R: Read>(mut reader: &mut R) -> Result<Self, DecodeError> {
- let mut values = Vec::new();
- loop {
- let mut track_read = ReadTrackingReader::new(&mut reader);
- match MaybeReadable::read(&mut track_read) {
- Ok(Some(v)) => { values.push(v); },
- Ok(None) => { },
- // If we failed to read any bytes at all, we reached the end of our TLV
- // stream and have simply exhausted all entries.
- Err(ref e) if e == &DecodeError::ShortRead && !track_read.have_read => break,
- Err(e) => return Err(e),
- }
- }
- Ok(Self(values))
- }
-}
-
pub(crate) struct U48(pub u64);
impl Writeable for U48 {
#[inline]
}
}
}
+ impl From<$val_type> for HighZeroBytesDroppedBigSize<$val_type> {
+ fn from(val: $val_type) -> Self { Self(val) }
+ }
}
}
+impl_writeable_primitive!(u128, 16);
impl_writeable_primitive!(u64, 8);
impl_writeable_primitive!(u32, 4);
impl_writeable_primitive!(u16, 2);
);
}
-impl_array!(3); // for rgb
+impl_array!(3); // for rgb, ISO 4712 code
impl_array!(4); // for IPv4
impl_array!(12); // for OnionV2
impl_array!(16); // for IPv6
}
}
+/// For variable-length values within TLV record where the length is encoded as part of the record.
+/// Used to prevent encoding the length twice.
+pub(crate) struct WithoutLength<T>(pub T);
+
+impl Writeable for WithoutLength<&String> {
+ #[inline]
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ w.write_all(self.0.as_bytes())
+ }
+}
+impl Readable for WithoutLength<String> {
+ #[inline]
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let v: WithoutLength<Vec<u8>> = Readable::read(r)?;
+ Ok(Self(String::from_utf8(v.0).map_err(|_| DecodeError::InvalidValue)?))
+ }
+}
+impl<'a> From<&'a String> for WithoutLength<&'a String> {
+ fn from(s: &'a String) -> Self { Self(s) }
+}
+
+impl<'a, T: Writeable> Writeable for WithoutLength<&'a Vec<T>> {
+ #[inline]
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ for ref v in self.0.iter() {
+ v.write(writer)?;
+ }
+ Ok(())
+ }
+}
+
+impl<T: MaybeReadable> Readable for WithoutLength<Vec<T>> {
+ #[inline]
+ fn read<R: Read>(mut reader: &mut R) -> Result<Self, DecodeError> {
+ let mut values = Vec::new();
+ loop {
+ let mut track_read = ReadTrackingReader::new(&mut reader);
+ match MaybeReadable::read(&mut track_read) {
+ Ok(Some(v)) => { values.push(v); },
+ Ok(None) => { },
+ // If we failed to read any bytes at all, we reached the end of our TLV
+ // stream and have simply exhausted all entries.
+ Err(ref e) if e == &DecodeError::ShortRead && !track_read.have_read => break,
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(Self(values))
+ }
+}
+impl<'a, T> From<&'a Vec<T>> for WithoutLength<&'a Vec<T>> {
+ fn from(v: &'a Vec<T>) -> Self { Self(v) }
+}
+
// HashMap
impl<K, V> Writeable for HashMap<K, V>
where K: Writeable + Eq + Hash,
}
}
+impl Writeable for ChainHash {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ w.write_all(self.as_bytes())
+ }
+}
+
+impl Readable for ChainHash {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let buf: [u8; 32] = Readable::read(r)?;
+ Ok(ChainHash::from(&buf[..]))
+ }
+}
+
impl Writeable for OutPoint {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.txid.write(w)?;
$field.write($stream)?;
};
($stream: expr, $type: expr, $field: expr, vec_type) => {
- encode_tlv!($stream, $type, $crate::util::ser::VecWriteWrapper(&$field), required);
+ encode_tlv!($stream, $type, $crate::util::ser::WithoutLength(&$field), required);
};
($stream: expr, $optional_type: expr, $optional_field: expr, option) => {
if let Some(ref field) = $optional_field {
field.write($stream)?;
}
};
+ ($stream: expr, $type: expr, $field: expr, (option, encoding: ($fieldty: ty, $encoding: ident))) => {
+ encode_tlv!($stream, $type, $field.map(|f| $encoding(f)), option);
+ };
+ ($stream: expr, $type: expr, $field: expr, (option, encoding: $fieldty: ty)) => {
+ encode_tlv!($stream, $type, $field, option);
+ };
}
macro_rules! encode_tlv_stream {
$len.0 += field_len;
};
($len: expr, $type: expr, $field: expr, vec_type) => {
- get_varint_length_prefixed_tlv_length!($len, $type, $crate::util::ser::VecWriteWrapper(&$field), required);
+ get_varint_length_prefixed_tlv_length!($len, $type, $crate::util::ser::WithoutLength(&$field), required);
};
($len: expr, $optional_type: expr, $optional_field: expr, option) => {
if let Some(ref field) = $optional_field {
($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
// no-op
}};
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (option, encoding: $encoding: tt)) => {{
+ // no-op
+ }};
}
macro_rules! check_missing_tlv {
($last_seen_type: expr, $type: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
// no-op
}};
+ ($last_seen_type: expr, $type: expr, $field: ident, (option, encoding: $encoding: tt)) => {{
+ // no-op
+ }};
}
macro_rules! decode_tlv {
$field = $crate::util::ser::Readable::read(&mut $reader)?;
}};
($reader: expr, $field: ident, vec_type) => {{
- let f: $crate::util::ser::VecReadWrapper<_> = $crate::util::ser::Readable::read(&mut $reader)?;
+ let f: $crate::util::ser::WithoutLength<Vec<_>> = $crate::util::ser::Readable::read(&mut $reader)?;
$field = Some(f.0);
}};
($reader: expr, $field: ident, option) => {{
($reader: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
$field = Some($trait::read(&mut $reader $(, $read_arg)*)?);
}};
+ ($reader: expr, $field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => {{
+ $field = {
+ let field: $encoding<$fieldty> = ser::Readable::read(&mut $reader)?;
+ Some(field.0)
+ };
+ }};
+ ($reader: expr, $field: ident, (option, encoding: $fieldty: ty)) => {{
+ decode_tlv!($reader, $field, option);
+ }};
}
// `$decode_custom_tlv` is a closure that may be optionally provided to handle custom message types.
// `Ok(false)` if the message type is unknown, and `Err(DecodeError)` if parsing fails.
macro_rules! decode_tlv_stream {
($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
+ $(, $decode_custom_tlv: expr)?) => { {
+ let rewind = |_, _| { unreachable!() };
+ use core::ops::RangeBounds;
+ decode_tlv_stream_range!(
+ $stream, .., rewind, {$(($type, $field, $fieldty)),*} $(, $decode_custom_tlv)?
+ );
+ } }
+}
+
+macro_rules! decode_tlv_stream_range {
+ ($stream: expr, $range: expr, $rewind: ident, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
$(, $decode_custom_tlv: expr)?) => { {
use $crate::ln::msgs::DecodeError;
let mut last_seen_type: Option<u64> = None;
// UnexpectedEof. This should in every case be largely cosmetic, but its nice to
// pass the TLV test vectors exactly, which requre this distinction.
let mut tracking_reader = ser::ReadTrackingReader::new(&mut stream_ref);
- match $crate::util::ser::Readable::read(&mut tracking_reader) {
+ match <$crate::util::ser::BigSize as $crate::util::ser::Readable>::read(&mut tracking_reader) {
Err(DecodeError::ShortRead) => {
if !tracking_reader.have_read {
break 'tlv_read;
}
},
Err(e) => return Err(e),
- Ok(t) => t,
+ Ok(t) => if $range.contains(&t.0) { t } else {
+ drop(tracking_reader);
+
+ // Assumes the type id is minimally encoded, which is enforced on read.
+ use $crate::util::ser::Writeable;
+ let bytes_read = t.serialized_length();
+ $rewind(stream_ref, bytes_read);
+ break 'tlv_read;
+ },
}
};
};
}
+macro_rules! init_and_read_tlv_fields {
+ ($reader: ident, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => {
+ $(
+ init_tlv_field_var!($field, $fieldty);
+ )*
+
+ read_tlv_fields!($reader, {
+ $(($type, $field, $fieldty)),*
+ });
+ }
+}
+
/// Implements Readable/Writeable for a struct storing it as a set of TLVs
/// If $fieldty is `required`, then $field is a required field that is not an Option nor a Vec.
/// If $fieldty is `option`, then $field is optional field.
impl $crate::util::ser::Readable for $st {
fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, $crate::ln::msgs::DecodeError> {
- $(
- init_tlv_field_var!($field, $fieldty);
- )*
- read_tlv_fields!(reader, {
+ init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok(Self {
}
}
+/// Defines a struct for a TLV stream and a similar struct using references for non-primitive types,
+/// implementing [`Readable`] for the former and [`Writeable`] for the latter. Useful as an
+/// intermediary format when reading or writing a type encoded as a TLV stream. Note that each field
+/// representing a TLV record has its type wrapped with an [`Option`]. A tuple consisting of a type
+/// and a serialization wrapper may be given in place of a type when custom serialization is
+/// required.
+///
+/// [`Readable`]: crate::util::ser::Readable
+/// [`Writeable`]: crate::util::ser::Writeable
+macro_rules! tlv_stream {
+ ($name:ident, $nameref:ident, $range:expr, {
+ $(($type:expr, $field:ident : $fieldty:tt)),* $(,)*
+ }) => {
+ #[derive(Debug)]
+ pub(crate) struct $name {
+ $(
+ $field: Option<tlv_record_type!($fieldty)>,
+ )*
+ }
+
+ pub(crate) struct $nameref<'a> {
+ $(
+ pub(crate) $field: Option<tlv_record_ref_type!($fieldty)>,
+ )*
+ }
+
+ impl<'a> $crate::util::ser::Writeable for $nameref<'a> {
+ fn write<W: $crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
+ encode_tlv_stream!(writer, {
+ $(($type, self.$field, (option, encoding: $fieldty))),*
+ });
+ Ok(())
+ }
+ }
+
+ impl $crate::util::ser::SeekReadable for $name {
+ fn read<R: $crate::io::Read + $crate::io::Seek>(reader: &mut R) -> Result<Self, $crate::ln::msgs::DecodeError> {
+ $(
+ init_tlv_field_var!($field, option);
+ )*
+ let rewind = |cursor: &mut R, offset: usize| {
+ cursor.seek($crate::io::SeekFrom::Current(-(offset as i64))).expect("");
+ };
+ decode_tlv_stream_range!(reader, $range, rewind, {
+ $(($type, $field, (option, encoding: $fieldty))),*
+ });
+
+ Ok(Self {
+ $(
+ $field: $field
+ ),*
+ })
+ }
+ }
+ }
+}
+
+macro_rules! tlv_record_type {
+ (($type:ty, $wrapper:ident)) => { $type };
+ ($type:ty) => { $type };
+}
+
+macro_rules! tlv_record_ref_type {
+ (char) => { char };
+ (u8) => { u8 };
+ ((u16, $wrapper: ident)) => { u16 };
+ ((u32, $wrapper: ident)) => { u32 };
+ ((u64, $wrapper: ident)) => { u64 };
+ (($type:ty, $wrapper:ident)) => { &'a $type };
+ ($type:ty) => { &'a $type };
+}
+
macro_rules! _impl_writeable_tlv_based_enum_common {
($st: ident, $(($variant_id: expr, $variant_name: ident) =>
{$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
let id: u8 = $variant_id;
id.write(writer)?;
write_tlv_fields!(writer, {
- $(($type, $field, $fieldty)),*
+ $(($type, *$field, $fieldty)),*
});
}),*
$($st::$tuple_variant_name (ref field) => {
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- $(
- init_tlv_field_var!($field, $fieldty);
- )*
- read_tlv_fields!(reader, {
+ init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok(Some($st::$variant_name {
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- $(
- init_tlv_field_var!($field, $fieldty);
- )*
- read_tlv_fields!(reader, {
+ init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok($st::$variant_name {
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Utilities for strings.
+
+use core::fmt;
+
+/// A string that displays only printable characters, replacing control characters with
+/// [`core::char::REPLACEMENT_CHARACTER`].
+#[derive(Debug, PartialEq)]
+pub struct PrintableString<'a>(pub &'a str);
+
+impl<'a> fmt::Display for PrintableString<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ use core::fmt::Write;
+ for c in self.0.chars() {
+ let c = if c.is_control() { core::char::REPLACEMENT_CHARACTER } else { c };
+ f.write_char(c)?;
+ }
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::PrintableString;
+
+ #[test]
+ fn displays_printable_string() {
+ assert_eq!(
+ format!("{}", PrintableString("I \u{1F496} LDK!\t\u{26A1}")),
+ "I \u{1F496} LDK!\u{FFFD}\u{26A1}",
+ );
+ }
+}
use alloc::sync::Arc;
use core::mem;
-use crate::sync::{Condvar, Mutex};
+use crate::sync::{Condvar, Mutex, MutexGuard};
use crate::prelude::*;
condvar: Condvar,
}
+macro_rules! check_woken {
+ ($guard: expr, $retval: expr) => { {
+ if $guard.0 {
+ $guard.0 = false;
+ if $guard.1.as_ref().map(|l| l.lock().unwrap().complete).unwrap_or(false) {
+ // If we're about to return as woken, and the future state is marked complete, wipe
+ // the future state and let the next future wait until we get a new notify.
+ $guard.1.take();
+ }
+ return $retval;
+ }
+ } }
+}
+
impl Notifier {
pub(crate) fn new() -> Self {
Self {
}
}
+ fn propagate_future_state_to_notify_flag(&self) -> MutexGuard<(bool, Option<Arc<Mutex<FutureState>>>)> {
+ let mut lock = self.notify_pending.lock().unwrap();
+ if let Some(existing_state) = &lock.1 {
+ if existing_state.lock().unwrap().callbacks_made {
+ // If the existing `FutureState` has completed and actually made callbacks,
+ // consider the notification flag to have been cleared and reset the future state.
+ lock.1.take();
+ lock.0 = false;
+ }
+ }
+ lock
+ }
+
pub(crate) fn wait(&self) {
loop {
- let mut guard = self.notify_pending.lock().unwrap();
- if guard.0 {
- guard.0 = false;
- return;
- }
+ let mut guard = self.propagate_future_state_to_notify_flag();
+ check_woken!(guard, ());
guard = self.condvar.wait(guard).unwrap();
- let result = guard.0;
- if result {
- guard.0 = false;
- return
- }
+ check_woken!(guard, ());
}
}
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
- let mut guard = self.notify_pending.lock().unwrap();
- if guard.0 {
- guard.0 = false;
- return true;
- }
+ let mut guard = self.propagate_future_state_to_notify_flag();
+ check_woken!(guard, true);
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
+ check_woken!(guard, true);
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
- let result = guard.0;
- if result || elapsed >= max_wait {
- guard.0 = false;
- return result;
+ if elapsed >= max_wait {
+ return false;
}
match max_wait.checked_sub(elapsed) {
- None => return result,
+ None => return false,
Some(_) => continue
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
- lock.0 = true;
- if let Some(future_state) = lock.1.take() {
+ if let Some(future_state) = &lock.1 {
future_state.lock().unwrap().complete();
}
+ lock.0 = true;
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
- let mut lock = self.notify_pending.lock().unwrap();
- if lock.0 {
- Future {
- state: Arc::new(Mutex::new(FutureState {
- callbacks: Vec::new(),
- complete: true,
- }))
- }
- } else if let Some(existing_state) = &lock.1 {
+ let mut lock = self.propagate_future_state_to_notify_flag();
+ if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
- complete: false,
+ complete: lock.0,
+ callbacks_made: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
pub(crate) struct FutureState {
- callbacks: Vec<Box<dyn FutureCallback>>,
+ // When we're tracking whether a callback counts as having woken the user's code, we check the
+ // first bool - set to false if we're just calling a Waker, and true if we're calling an actual
+ // user-provided function.
+ callbacks: Vec<(bool, Box<dyn FutureCallback>)>,
complete: bool,
+ callbacks_made: bool,
}
impl FutureState {
fn complete(&mut self) {
- for callback in self.callbacks.drain(..) {
+ for (counts_as_call, callback) in self.callbacks.drain(..) {
callback.call();
+ self.callbacks_made |= counts_as_call;
}
self.complete = true;
}
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
+ state.callbacks_made = true;
mem::drop(state);
callback.call();
} else {
- state.callbacks.push(callback);
+ state.callbacks.push((true, callback));
}
}
}
}
-mod std_future {
- use core::task::Waker;
- pub struct StdWaker(pub Waker);
- impl super::FutureCallback for StdWaker {
- fn call(&self) { self.0.wake_by_ref() }
- }
+use core::task::Waker;
+struct StdWaker(pub Waker);
+impl FutureCallback for StdWaker {
+ fn call(&self) { self.0.wake_by_ref() }
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
+ state.callbacks_made = true;
Poll::Ready(())
} else {
let waker = cx.waker().clone();
- state.callbacks.push(Box::new(std_future::StdWaker(waker)));
+ state.callbacks.push((false, Box::new(StdWaker(waker))));
Poll::Pending
}
}
assert!(callback.load(Ordering::SeqCst));
}
+ #[test]
+ fn notifier_future_completes_wake() {
+ // Previously, if we were only using the `Future` interface to learn when a `Notifier` has
+ // been notified, we'd never mark the notifier as not-awaiting-notify. This caused the
+ // `lightning-background-processor` to persist in a tight loop.
+ let notifier = Notifier::new();
+
+ // First check the simple case, ensuring if we get notified a new future isn't woken until
+ // a second `notify`.
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ notifier.get_future().register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(!callback.load(Ordering::SeqCst));
+
+ notifier.notify();
+ assert!(callback.load(Ordering::SeqCst));
+
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ notifier.get_future().register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(!callback.load(Ordering::SeqCst));
+
+ notifier.notify();
+ assert!(callback.load(Ordering::SeqCst));
+
+ // Then check the case where the future is fetched before the notification, but a callback
+ // is only registered after the `notify`, ensuring that it is still sufficient to ensure we
+ // don't get an instant-wake when we get a new future.
+ let future = notifier.get_future();
+ notifier.notify();
+
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(callback.load(Ordering::SeqCst));
+
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ notifier.get_future().register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(!callback.load(Ordering::SeqCst));
+ }
+
+ #[test]
+ fn new_future_wipes_notify_bit() {
+ // Previously, if we were only using the `Future` interface to learn when a `Notifier` has
+ // been notified, we'd never mark the notifier as not-awaiting-notify if a `Future` is
+ // fetched after the notify bit has been set.
+ let notifier = Notifier::new();
+ notifier.notify();
+
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ notifier.get_future().register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(callback.load(Ordering::SeqCst));
+
+ let callback = Arc::new(AtomicBool::new(false));
+ let callback_ref = Arc::clone(&callback);
+ notifier.get_future().register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(!callback.load(Ordering::SeqCst));
+
+ notifier.notify();
+ assert!(callback.load(Ordering::SeqCst));
+ }
+
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
+ callbacks_made: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
+ callbacks_made: false,
}))
};
future.state.lock().unwrap().complete();
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
+ callbacks_made: false,
}))
};
let mut second_future = Future { state: Arc::clone(&future.state) };
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Ready(()));
}
+
+ #[test]
+ fn test_dropped_future_doesnt_count() {
+ // Tests that if a Future gets drop'd before it is poll()ed `Ready` it doesn't count as
+ // having been woken, leaving the notify-required flag set.
+ let notifier = Notifier::new();
+ notifier.notify();
+
+ // If we get a future and don't touch it we're definitely still notify-required.
+ notifier.get_future();
+ assert!(notifier.wait_timeout(Duration::from_millis(1)));
+ assert!(!notifier.wait_timeout(Duration::from_millis(1)));
+
+ // Even if we poll'd once but didn't observe a `Ready`, we should be notify-required.
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
+
+ notifier.notify();
+ assert!(woken.load(Ordering::SeqCst));
+ assert!(notifier.wait_timeout(Duration::from_millis(1)));
+
+ // However, once we do poll `Ready` it should wipe the notify-required flag.
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
+
+ notifier.notify();
+ assert!(woken.load(Ordering::SeqCst));
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ assert!(!notifier.wait_timeout(Duration::from_millis(1)));
+ }
}
--- /dev/null
+## API Updates
+- `PaymentReceived` events now have `via_channel_id` and `via_user_channel_id`
+ fields exposing the hop over which we received an inbound payment. Also,
+ `ChannelDetails` now expose the currently observed number of `confirmations`
+ on the funding transaction.
+
+## Backwards Compatibilty
+- Inbound payments with HTLCs pending on update to 0.0.113 will result
+ in a `PaymentReceived` event with `user_channel_id` 0.