toolchain: ${{ matrix.toolchain }}
override: true
profile: minimal
+ - name: Pin dependencies
+ if: ${{ matrix.toolchain == '1.63.0' }}
+ run: |
+ cargo update -p tokio --precise "1.37.0" --verbose
+ cargo update -p tokio-macros --precise "2.2.0" --verbose
+ cargo update -p postgres-types --precise "0.2.6" --verbose
- name: Build on Rust ${{ matrix.toolchain }}
run: |
cargo build --verbose --color always
+++ /dev/null
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "async-trait"
-version = "0.1.58"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "autocfg"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
-
-[[package]]
-name = "base64"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
-
-[[package]]
-name = "bech32"
-version = "0.9.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445"
-
-[[package]]
-name = "bitcoin"
-version = "0.29.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0694ea59225b0c5f3cb405ff3f670e4828358ed26aec49dc352f730f0cb1a8a3"
-dependencies = [
- "bech32",
- "bitcoin_hashes",
- "secp256k1",
-]
-
-[[package]]
-name = "bitcoin_hashes"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4"
-
-[[package]]
-name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
-[[package]]
-name = "block-buffer"
-version = "0.10.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
-dependencies = [
- "generic-array",
-]
-
-[[package]]
-name = "byteorder"
-version = "1.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
-
-[[package]]
-name = "bytes"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
-
-[[package]]
-name = "cc"
-version = "1.0.79"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "chunked_transfer"
-version = "1.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cca491388666e04d7248af3f60f0c40cfb0991c72205595d7c396e3510207d1a"
-
-[[package]]
-name = "cpufeatures"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "crypto-common"
-version = "0.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
-dependencies = [
- "generic-array",
- "typenum",
-]
-
-[[package]]
-name = "digest"
-version = "0.10.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f"
-dependencies = [
- "block-buffer",
- "crypto-common",
- "subtle",
-]
-
-[[package]]
-name = "fallible-iterator"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
-
-[[package]]
-name = "futures"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-executor",
- "futures-io",
- "futures-sink",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-channel"
-version = "0.3.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2"
-dependencies = [
- "futures-core",
- "futures-sink",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c"
-
-[[package]]
-name = "futures-executor"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2"
-dependencies = [
- "futures-core",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-io"
-version = "0.3.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964"
-
-[[package]]
-name = "futures-macro"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "futures-sink"
-version = "0.3.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e"
-
-[[package]]
-name = "futures-task"
-version = "0.3.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65"
-
-[[package]]
-name = "futures-util"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-macro",
- "futures-sink",
- "futures-task",
- "memchr",
- "pin-project-lite",
- "pin-utils",
- "slab",
-]
-
-[[package]]
-name = "generic-array"
-version = "0.14.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
-dependencies = [
- "typenum",
- "version_check",
-]
-
-[[package]]
-name = "getrandom"
-version = "0.2.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4"
-dependencies = [
- "cfg-if",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "hmac"
-version = "0.12.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
-dependencies = [
- "digest",
-]
-
-[[package]]
-name = "instant"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "itoa"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
-
-[[package]]
-name = "libc"
-version = "0.2.141"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5"
-
-[[package]]
-name = "lightning"
-version = "0.0.114"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "800ec68a160529ba3ca12c5db629867c4a8de2df272792c1246602966a5b789b"
-dependencies = [
- "bitcoin",
-]
-
-[[package]]
-name = "lightning-block-sync"
-version = "0.0.114"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71a6fcb6ffc245cf0aee4a4654145a2a7e670be2d879c14b8ed456cdc4235255"
-dependencies = [
- "bitcoin",
- "chunked_transfer",
- "futures-util",
- "lightning",
- "serde",
- "serde_json",
-]
-
-[[package]]
-name = "lightning-net-tokio"
-version = "0.0.114"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19197703db1195ed908ccd84cd76baaa5916e0d22750a2e1b7188f66516812bd"
-dependencies = [
- "bitcoin",
- "lightning",
- "tokio",
-]
-
-[[package]]
-name = "lock_api"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
-dependencies = [
- "autocfg",
- "scopeguard",
-]
-
-[[package]]
-name = "log"
-version = "0.4.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "md-5"
-version = "0.10.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca"
-dependencies = [
- "digest",
-]
-
-[[package]]
-name = "memchr"
-version = "2.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
-
-[[package]]
-name = "mio"
-version = "0.7.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc"
-dependencies = [
- "libc",
- "log",
- "miow",
- "ntapi",
- "winapi",
-]
-
-[[package]]
-name = "miow"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "ntapi"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "num_cpus"
-version = "1.15.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
-dependencies = [
- "hermit-abi",
- "libc",
-]
-
-[[package]]
-name = "once_cell"
-version = "1.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0"
-
-[[package]]
-name = "parking_lot"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
-dependencies = [
- "instant",
- "lock_api",
- "parking_lot_core",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
-dependencies = [
- "cfg-if",
- "instant",
- "libc",
- "redox_syscall",
- "smallvec",
- "winapi",
-]
-
-[[package]]
-name = "percent-encoding"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
-
-[[package]]
-name = "phf"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
-dependencies = [
- "phf_shared",
-]
-
-[[package]]
-name = "phf_shared"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
-dependencies = [
- "siphasher",
-]
-
-[[package]]
-name = "pin-project-lite"
-version = "0.2.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
-
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
-[[package]]
-name = "postgres-protocol"
-version = "0.6.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "878c6cbf956e03af9aa8204b407b9cbf47c072164800aa918c516cd4b056c50c"
-dependencies = [
- "base64",
- "byteorder",
- "bytes",
- "fallible-iterator",
- "hmac",
- "md-5",
- "memchr",
- "rand",
- "sha2",
- "stringprep",
-]
-
-[[package]]
-name = "postgres-types"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ebd6e8b7189a73169290e89bd24c771071f1012d8fe6f738f5226531f0b03d89"
-dependencies = [
- "bytes",
- "fallible-iterator",
- "postgres-protocol",
-]
-
-[[package]]
-name = "ppv-lite86"
-version = "0.2.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.56"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "rand"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
-dependencies = [
- "libc",
- "rand_chacha",
- "rand_core",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.6.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "rapid-gossip-sync-server"
-version = "0.1.0"
-dependencies = [
- "bitcoin",
- "futures",
- "lightning",
- "lightning-block-sync",
- "lightning-net-tokio",
- "tokio",
- "tokio-postgres",
-]
-
-[[package]]
-name = "redox_syscall"
-version = "0.2.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
-dependencies = [
- "bitflags",
-]
-
-[[package]]
-name = "ryu"
-version = "1.0.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
-
-[[package]]
-name = "scopeguard"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-
-[[package]]
-name = "secp256k1"
-version = "0.24.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62"
-dependencies = [
- "bitcoin_hashes",
- "secp256k1-sys",
-]
-
-[[package]]
-name = "secp256k1-sys"
-version = "0.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b"
-dependencies = [
- "cc",
-]
-
-[[package]]
-name = "serde"
-version = "1.0.147"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.147"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.95"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744"
-dependencies = [
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "sha2"
-version = "0.10.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0"
-dependencies = [
- "cfg-if",
- "cpufeatures",
- "digest",
-]
-
-[[package]]
-name = "signal-hook-registry"
-version = "1.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "siphasher"
-version = "0.3.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
-
-[[package]]
-name = "slab"
-version = "0.4.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "smallvec"
-version = "1.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
-
-[[package]]
-name = "socket2"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662"
-dependencies = [
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "stringprep"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
-dependencies = [
- "unicode-bidi",
- "unicode-normalization",
-]
-
-[[package]]
-name = "subtle"
-version = "2.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
-
-[[package]]
-name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "tinyvec"
-version = "1.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
-dependencies = [
- "tinyvec_macros",
-]
-
-[[package]]
-name = "tinyvec_macros"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
-
-[[package]]
-name = "tokio"
-version = "1.14.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9d0183f6f6001549ab68f8c7585093bb732beefbcf6d23a10b9b95c73a1dd49"
-dependencies = [
- "autocfg",
- "bytes",
- "libc",
- "memchr",
- "mio",
- "num_cpus",
- "once_cell",
- "parking_lot",
- "pin-project-lite",
- "signal-hook-registry",
- "tokio-macros",
- "winapi",
-]
-
-[[package]]
-name = "tokio-macros"
-version = "1.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "tokio-postgres"
-version = "0.7.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b6c8b33df661b548dcd8f9bf87debb8c56c05657ed291122e1188698c2ece95"
-dependencies = [
- "async-trait",
- "byteorder",
- "bytes",
- "fallible-iterator",
- "futures",
- "log",
- "parking_lot",
- "percent-encoding",
- "phf",
- "pin-project-lite",
- "postgres-protocol",
- "postgres-types",
- "socket2",
- "tokio",
- "tokio-util",
-]
-
-[[package]]
-name = "tokio-util"
-version = "0.6.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507"
-dependencies = [
- "bytes",
- "futures-core",
- "futures-sink",
- "log",
- "pin-project-lite",
- "tokio",
-]
-
-[[package]]
-name = "typenum"
-version = "1.16.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
-
-[[package]]
-name = "unicode-bidi"
-version = "0.3.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
-
-[[package]]
-name = "unicode-normalization"
-version = "0.1.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
-dependencies = [
- "tinyvec",
-]
-
-[[package]]
-name = "version_check"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
-
-[[package]]
-name = "wasi"
-version = "0.11.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
edition = "2021"
[dependencies]
-bitcoin = "0.30"
-hex-conservative = "0.2"
-lightning = { version = "0.0.121" }
-lightning-block-sync = { version = "0.0.121", features=["rest-client"] }
-lightning-net-tokio = { version = "0.0.121" }
+bitcoin = "0.32.2"
+hex-conservative = "0.2.1"
+lightning = { version = "0.0.124" }
+lightning-block-sync = { version = "0.0.124", features=["rest-client"] }
+lightning-net-tokio = { version = "0.0.124" }
tokio = { version = "1.25", features = ["full"] }
tokio-postgres = { version = "=0.7.5" }
futures = "0.3"
[dev-dependencies]
-lightning = { version = "0.0.121", features = ["_test_utils"] }
-lightning-rapid-gossip-sync = { version = "0.0.121" }
+lightning = { version = "0.0.124", features = ["_test_utils"] }
+lightning-rapid-gossip-sync = { version = "0.0.124" }
[profile.dev]
panic = "abort"
use crate::hex_utils;
use std::env;
-use std::io::Cursor;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
+use bitcoin::io::Cursor;
use bitcoin::Network;
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::PublicKey;
use lightning_block_sync::http::HttpEndpoint;
use tokio_postgres::Config;
-pub(crate) const SCHEMA_VERSION: i32 = 13;
+pub(crate) const SCHEMA_VERSION: i32 = 14;
pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
// generate symlinks based on a 3-hour-granularity
/// That reminder may be either in the form of a channel announcement, or in the form of empty
/// updates in both directions.
pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
+
+/// The interval after which graph data gets pruned after it was first seen
+/// This should match the LDK default pruning interval, which is 14 days
+pub(crate) const PRUNE_INTERVAL: Duration = Duration::from_secs(14 * 24 * 60 * 60);
+
+/// Maximum number of default features to calculate for node announcements
+pub(crate) const NODE_DEFAULT_FEATURE_COUNT: u8 = 6;
+
/// The number of successful peer connections to await prior to continuing to gossip storage.
/// The application will still work if the number of specified peers is lower, as long as there is
/// at least one successful peer connection, but it may result in long startup times.
)"
}
+pub(crate) fn db_node_announcement_table_creation_query() -> &'static str {
+ "CREATE TABLE IF NOT EXISTS node_announcements (
+ id SERIAL PRIMARY KEY,
+ public_key varchar(66) NOT NULL,
+ features BYTEA NOT NULL,
+ socket_addresses BYTEA NOT NULL,
+ timestamp bigint NOT NULL,
+ announcement_signed BYTEA,
+ seen timestamp NOT NULL DEFAULT NOW()
+ )"
+}
+
pub(crate) fn db_index_creation_query() -> &'static str {
"
CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
CREATE INDEX IF NOT EXISTS channel_updates_scid_asc_timestamp_desc ON channel_updates(short_channel_id ASC, timestamp DESC);
+ CREATE INDEX IF NOT EXISTS node_announcements_seen_pubkey ON node_announcements(seen, public_key);
"
}
let announcement: Vec<u8> = row.get("announcement_signed");
let tx_ref = &tx;
updates.push(async move {
- let scid = ChannelAnnouncement::read(&mut Cursor::new(announcement)).unwrap().contents.short_channel_id as i64;
+ let scid = ChannelAnnouncement::read(&mut Cursor::new(&announcement)).unwrap().contents.short_channel_id as i64;
assert!(scid > 0); // Will roll over in some 150 years or so
tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
});
tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
tx.commit().await.unwrap();
}
+ if schema >= 1 && schema <= 13 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 14 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
if schema <= 1 || schema > SCHEMA_VERSION {
panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
}
// Set the environment variable, including a repeated comma, leading space, and trailing comma.
std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
let peers = ln_peers();
-
+
// Assert output is as expected
assert_eq!(
peers,
use crate::verifier::ChainVerifier;
pub(crate) struct GossipCounter {
+ pub(crate) node_announcements: u64,
pub(crate) channel_announcements: u64,
pub(crate) channel_updates: u64,
pub(crate) channel_updates_without_htlc_max_msats: u64,
impl GossipCounter {
pub(crate) fn new() -> Self {
Self {
+ node_announcements: 0,
channel_announcements: 0,
channel_updates: 0,
channel_updates_without_htlc_max_msats: 0,
}
}
+ fn new_node_announcement(&self, msg: NodeAnnouncement) {
+ {
+ let mut counter = self.counter.write().unwrap();
+ counter.node_announcements += 1;
+ }
+
+ let gossip_message = GossipMessage::NodeAnnouncement(msg, None);
+ if let Err(err) = self.sender.try_send(gossip_message) {
+ let gossip_message = match err { TrySendError::Full(msg)|TrySendError::Closed(msg) => msg };
+ tokio::task::block_in_place(move || { tokio::runtime::Handle::current().block_on(async move {
+ self.sender.send(gossip_message).await.unwrap();
+ })});
+ }
+ }
+
fn new_channel_update(&self, msg: ChannelUpdate) {
self.counter.write().unwrap().channel_updates += 1;
let gossip_message = GossipMessage::ChannelUpdate(msg, None);
MessageSendEvent::BroadcastChannelAnnouncement { msg, .. } => {
self.new_channel_announcement(msg);
},
- MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
+ MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+ self.new_node_announcement(msg);
+ },
MessageSendEvent::BroadcastChannelUpdate { msg } => {
self.new_channel_update(msg);
},
impl<L: Deref + Clone + Send + Sync> RoutingMessageHandler for GossipRouter<L> where L::Target: Logger {
fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError> {
- self.native_router.handle_node_announcement(msg)
+ let res = self.native_router.handle_node_announcement(msg)?;
+ self.new_node_announcement(msg.clone());
+ Ok(res)
}
fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result<bool, LightningError> {
use crate::lookup::DeltaSet;
use crate::persistence::GossipPersister;
-use crate::serialization::UpdateSerialization;
+use crate::serialization::{MutatedNodeProperties, NodeSerializationStrategy, SerializationSet, UpdateSerialization};
use crate::snapshot::Snapshotter;
use crate::types::RGSSLogger;
/// sync formats arise in the future.
///
/// The fourth byte is the protocol version in case our format gets updated.
-const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
+const GOSSIP_PREFIX: [u8; 3] = [76, 68, 75];
pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
network_graph: Arc<NetworkGraph<L>>,
pub struct SerializedResponse {
pub data: Vec<u8>,
pub message_count: u32,
- pub announcement_count: u32,
+ pub node_announcement_count: u32,
+ /// Despite the name, the count of node announcements that have associated updates, be those
+ /// features, addresses, or both
+ pub node_update_count: u32,
+ pub node_feature_update_count: u32,
+ pub node_address_update_count: u32,
+ pub channel_announcement_count: u32,
pub update_count: u32,
pub update_count_full: u32,
pub update_count_incremental: u32,
blob
}
-async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
+async fn calculate_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializationSet where L::Target: Logger {
let client = connect_to_db().await;
network_graph.remove_stale_channels_and_tracking();
- let mut output: Vec<u8> = vec![];
- let snapshot_interval = config::snapshot_generation_interval();
-
// set a flag if the chain hash is prepended
// chain hash only necessary if either channel announcements or non-incremental updates are present
// for announcement-free incremental-only updates, chain hash can be skipped
+ let mut delta_set = DeltaSet::new();
+ lookup::fetch_channel_announcements(&mut delta_set, Arc::clone(&network_graph), &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
+ log_info!(logger, "announcement channel count: {}", delta_set.len());
+ lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
+ log_info!(logger, "update-fetched channel count: {}", delta_set.len());
+ let node_delta_set = lookup::fetch_node_updates(network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
+ log_info!(logger, "update-fetched node count: {}", node_delta_set.len());
+ lookup::filter_delta_set(&mut delta_set, logger.clone());
+ log_info!(logger, "update-filtered channel count: {}", delta_set.len());
+ serialization::serialize_delta_set(delta_set, node_delta_set, last_sync_timestamp)
+}
+
+fn serialize_delta<L: Deref + Clone>(serialization_details: &SerializationSet, serialization_version: u8, logger: L) -> SerializedResponse where L::Target: Logger {
+ let mut output: Vec<u8> = vec![];
+ let snapshot_interval = config::snapshot_generation_interval();
+
let mut node_id_set: HashSet<NodeId> = HashSet::new();
let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
let mut node_ids: Vec<NodeId> = Vec::new();
node_id_indices[&node_id]
};
- let mut delta_set = DeltaSet::new();
- lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, logger.clone()).await;
- log_info!(logger, "announcement channel count: {}", delta_set.len());
- lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
- log_info!(logger, "update-fetched channel count: {}", delta_set.len());
- lookup::filter_delta_set(&mut delta_set, logger.clone());
- log_info!(logger, "update-filtered channel count: {}", delta_set.len());
- let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
-
// process announcements
// write the number of channel announcements to the output
let announcement_count = serialization_details.announcements.len() as u32;
announcement_count.write(&mut output).unwrap();
let mut previous_announcement_scid = 0;
- for current_announcement in serialization_details.announcements {
+ for current_announcement in &serialization_details.announcements {
let id_index_1 = get_node_id_index(current_announcement.node_id_1);
let id_index_2 = get_node_id_index(current_announcement.node_id_2);
let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(¤t_announcement, id_index_1, id_index_2, previous_announcement_scid);
let update_count = serialization_details.updates.len() as u32;
update_count.write(&mut output).unwrap();
- let default_update_values = serialization_details.full_update_defaults;
+ let default_update_values = &serialization_details.full_update_defaults;
if update_count > 0 {
default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
let mut update_count_full = 0;
let mut update_count_incremental = 0;
- for current_update in serialization_details.updates {
+ for current_update in &serialization_details.updates {
match ¤t_update {
UpdateSerialization::Full(_) => {
update_count_full += 1;
let message_count = announcement_count + update_count;
let mut prefixed_output = GOSSIP_PREFIX.to_vec();
+ prefixed_output.push(serialization_version);
// always write the chain hash
serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
+ if serialization_version >= 2 { // serialize the most common node features
+ for mutated_node_id in serialization_details.node_mutations.keys() {
+ // consider mutated nodes outside channel announcements
+ get_node_id_index(mutated_node_id.clone());
+ }
+
+ let default_feature_count = serialization_details.node_announcement_feature_defaults.len() as u8;
+ debug_assert!(default_feature_count <= config::NODE_DEFAULT_FEATURE_COUNT, "Default feature count cannot exceed maximum");
+ default_feature_count.write(&mut prefixed_output).unwrap();
+
+ for current_feature in &serialization_details.node_announcement_feature_defaults {
+ current_feature.write(&mut prefixed_output).unwrap();
+ }
+ }
+
let node_id_count = node_ids.len() as u32;
node_id_count.write(&mut prefixed_output).unwrap();
+ let mut node_update_count = 0u32;
+ let mut node_feature_update_count = 0u32;
+ let mut node_address_update_count = 0u32;
+
for current_node_id in node_ids {
- current_node_id.write(&mut prefixed_output).unwrap();
+ let mut current_node_delta_serialization: Vec<u8> = Vec::new();
+ current_node_id.write(&mut current_node_delta_serialization).unwrap();
+
+ if serialization_version >= 2 {
+ if let Some(node_delta) = serialization_details.node_mutations.get(¤t_node_id) {
+ let strategy = node_delta.strategy.as_ref().unwrap();
+ let mut node_has_update = false;
+
+ /*
+ Bitmap:
+ 7: expect extra data after the pubkey (a u16 for the count, and then that number of bytes)
+ 5-3: index of new features among default (1-6). If index is 7 (all 3 bits are set, it's
+ outside the present default range). 0 means no feature changes.
+ 2: addresses have changed
+
+ 1: used for all keys
+ 0: used for odd keys
+ */
+
+ match strategy {
+ NodeSerializationStrategy::Mutated(MutatedNodeProperties { addresses: true, .. }) | NodeSerializationStrategy::Full => {
+ let address_set = &node_delta.latest_details.as_ref().unwrap().addresses;
+ let mut address_serialization = Vec::new();
+
+ // we don't know a priori how many are <= 255 bytes
+ let mut total_address_count = 0u8;
+
+ for address in address_set.iter() {
+ if total_address_count == u8::MAX {
+ // don't serialize more than 255 addresses
+ break;
+ }
+ if let Ok(serialized_length) = u8::try_from(address.serialized_length()) {
+ total_address_count += 1;
+ serialized_length.write(&mut address_serialization).unwrap();
+ address.write(&mut address_serialization).unwrap();
+ };
+ }
+
+ node_address_update_count += 1;
+ node_has_update = true;
+
+ // signal the presence of node addresses
+ current_node_delta_serialization[0] |= 1 << 2;
+ // serialize the actual addresses and count
+ total_address_count.write(&mut current_node_delta_serialization).unwrap();
+ current_node_delta_serialization.append(&mut address_serialization);
+ },
+ _ => {}
+ }
+
+ match strategy {
+ NodeSerializationStrategy::Mutated(MutatedNodeProperties { features: true, .. }) | NodeSerializationStrategy::Full => {
+ let latest_features = &node_delta.latest_details.as_ref().unwrap().features;
+ node_feature_update_count += 1;
+ node_has_update = true;
+
+ // are these features among the most common ones?
+ if let Some(index) = serialization_details.node_announcement_feature_defaults.iter().position(|f| f == latest_features) {
+ // this feature set is among the 6 defaults
+ current_node_delta_serialization[0] |= ((index + 1) as u8) << 3;
+ } else {
+ current_node_delta_serialization[0] |= 0b_0011_1000; // 7 << 3
+ latest_features.write(&mut current_node_delta_serialization).unwrap();
+ }
+ },
+ _ => {}
+ }
+
+ if node_has_update {
+ node_update_count += 1;
+ } else if let NodeSerializationStrategy::Reminder = strategy {
+ current_node_delta_serialization[0] |= 1 << 6;
+ }
+ }
+ }
+
+ prefixed_output.append(&mut current_node_delta_serialization);
}
prefixed_output.append(&mut output);
SerializedResponse {
data: prefixed_output,
message_count,
- announcement_count,
+ node_announcement_count: node_id_count,
+ node_update_count,
+ node_feature_update_count,
+ node_address_update_count,
+ channel_announcement_count: announcement_count,
update_count,
update_count_full,
update_count_incremental,
-use std::collections::{BTreeMap, HashSet};
-use std::io::Cursor;
+use std::collections::{BTreeMap, HashMap, HashSet};
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-use lightning::routing::gossip::NetworkGraph;
+use bitcoin::io::Cursor;
+
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
+use lightning::routing::gossip::{NetworkGraph, NodeId};
use lightning::util::ser::Readable;
use tokio_postgres::Client;
use futures::StreamExt;
-use lightning::{log_gossip, log_info};
+use hex_conservative::DisplayHex;
+use lightning::{log_debug, log_gossip, log_info};
+use lightning::ln::features::NodeFeatures;
use lightning::util::logger::Logger;
use crate::config;
-use crate::serialization::MutatedProperties;
+use crate::serialization::{MutatedNodeProperties, MutatedProperties, NodeSerializationStrategy};
/// The delta set needs to be a BTreeMap so the keys are sorted.
/// That way, the scids in the response automatically grow monotonically
pub(super) type DeltaSet = BTreeMap<u64, ChannelDelta>;
+pub(super) type NodeDeltaSet = HashMap<NodeId, NodeDelta>;
pub(super) struct AnnouncementDelta {
pub(super) seen: u32,
}
pub(super) struct DirectedUpdateDelta {
+ /// the last update we saw prior to the user-provided timestamp
pub(super) last_update_before_seen: Option<UpdateDelta>,
- pub(super) mutated_properties: MutatedProperties,
+ /// the latest update we saw overall
pub(super) latest_update_after_seen: Option<UpdateDelta>,
- pub(super) serialization_update_flags: Option<u8>,
+ /// the set of all mutated properties across all updates between the last seen by the user and
+ /// the latest one known to us
+ pub(super) mutated_properties: MutatedProperties,
+ /// Specifically for reminder updates, the flag-only value to send to the client
+ pub(super) serialization_update_flags: Option<u8>
}
pub(super) struct ChannelDelta {
pub(super) requires_reminder: bool,
}
+pub(super) struct NodeDelta {
+ /// The most recently received, but new-to-the-client, node details
+ pub(super) latest_details: Option<NodeDetails>,
+
+ /// How should this delta be serialized?
+ pub(super) strategy: Option<NodeSerializationStrategy>,
+
+ /// The most recent node details that the client would have seen already
+ pub(super) last_details_before_seen: Option<NodeDetails>
+}
+
+pub(super) struct NodeDetails {
+ pub(super) seen: Option<u32>,
+ pub(super) features: NodeFeatures,
+ pub(super) addresses: HashSet<SocketAddress>
+}
+
impl Default for ChannelDelta {
fn default() -> Self {
Self {
}
}
+impl Default for NodeDelta {
+ fn default() -> Self {
+ Self {
+ latest_details: None,
+ last_details_before_seen: None,
+ strategy: None,
+ }
+ }
+}
+
impl Default for DirectedUpdateDelta {
fn default() -> Self {
Self {
}
}
+fn should_snapshot_include_reminders<L: Deref>(last_sync_timestamp: u32, current_timestamp: u64, logger: &L) -> bool where L::Target: Logger {
+ let current_hour = current_timestamp / 3600;
+ let current_day = current_timestamp / (24 * 3600);
+
+ log_debug!(logger, "Current day index: {}", current_day);
+ log_debug!(logger, "Current hour: {}", current_hour);
+
+ // every 5th day at midnight
+ let is_reminder_hour = (current_hour % 24) == 0;
+ let is_reminder_day = (current_day % 5) == 0;
+
+ let snapshot_scope = current_timestamp.saturating_sub(last_sync_timestamp as u64);
+ let is_reminder_scope = snapshot_scope > (50 * 3600);
+ log_debug!(logger, "Snapshot scope: {}s", snapshot_scope);
+
+ (is_reminder_hour && is_reminder_day) || is_reminder_scope
+}
+
/// Fetch all the channel announcements that are presently in the network graph, regardless of
/// whether they had been seen before.
/// Also include all announcements for which the first update was announced
/// after `last_sync_timestamp`
-pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaSet, network_graph: Arc<NetworkGraph<L>>, client: &Client, last_sync_timestamp: u32, logger: L) where L::Target: Logger {
+pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaSet, network_graph: Arc<NetworkGraph<L>>, client: &Client, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) where L::Target: Logger {
log_info!(logger, "Obtaining channel ids from network graph");
let channel_ids = {
let read_only_graph = network_graph.read_only();
log_info!(logger, "Retrieved read-only network graph copy");
let channel_iterator = read_only_graph.channels().unordered_iter();
channel_iterator
- .filter(|c| c.1.announcement_message.is_some())
+ .filter(|c| c.1.announcement_message.is_some() && c.1.one_to_two.is_some() && c.1.two_to_one.is_some())
.map(|c| c.1.announcement_message.as_ref().unwrap().contents.short_channel_id as i64)
.collect::<Vec<_>>()
};
log_info!(logger, "Last sync timestamp: {}", last_sync_timestamp);
let last_sync_timestamp_float = last_sync_timestamp as f64;
+ let current_timestamp = snapshot_reference_timestamp.unwrap_or(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs());
+ log_info!(logger, "Current timestamp: {}", current_timestamp);
+
+ let include_reminders = should_snapshot_include_reminders(last_sync_timestamp, current_timestamp, &logger);
+
log_info!(logger, "Obtaining corresponding database entries");
// get all the channel announcements that are currently in the network graph
let announcement_rows = client.query_raw("SELECT announcement_signed, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen FROM channel_announcements WHERE short_channel_id = any($1) ORDER BY short_channel_id ASC", [&channel_ids]).await.unwrap();
while let Some(row_res) = pinned_rows.next().await {
let current_announcement_row = row_res.unwrap();
let blob: Vec<u8> = current_announcement_row.get("announcement_signed");
- let mut readable = Cursor::new(blob);
+ let mut readable = Cursor::new(&blob);
let unsigned_announcement = ChannelAnnouncement::read(&mut readable).unwrap().contents;
let scid = unsigned_announcement.short_channel_id;
log_info!(logger, "Fetched {} update rows of the first update in a new direction", newer_oldest_directional_update_count);
}
- {
+ if include_reminders {
// THIS STEP IS USED TO DETERMINE IF A REMINDER UPDATE SHOULD BE SENT
log_info!(logger, "Annotating channel announcements whose latest channel update in a given direction occurred more than six days ago");
// Steps:
// — Obtain all updates, distinct by (scid, direction), ordered by seen DESC
// — From those updates, select distinct by (scid), ordered by seen ASC (to obtain the older one per direction)
- let reminder_threshold_timestamp = SystemTime::now().checked_sub(config::CHANNEL_REMINDER_AGE).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as f64;
-
- let params: [&(dyn tokio_postgres::types::ToSql + Sync); 2] =
- [&channel_ids, &reminder_threshold_timestamp];
- let older_latest_directional_updates = client.query_raw("
- SELECT short_channel_id FROM (
- SELECT DISTINCT ON (short_channel_id) *
- FROM (
- SELECT DISTINCT ON (short_channel_id, direction) short_channel_id, seen
- FROM channel_updates
- WHERE short_channel_id = any($1)
- ORDER BY short_channel_id ASC, direction ASC, seen DESC
- ) AS directional_last_seens
- ORDER BY short_channel_id ASC, seen ASC
- ) AS distinct_chans
- WHERE distinct_chans.seen <= TO_TIMESTAMP($2)
- ", params).await.unwrap();
- let mut pinned_updates = Box::pin(older_latest_directional_updates);
-
+ let reminder_threshold_timestamp = current_timestamp.checked_sub(config::CHANNEL_REMINDER_AGE.as_secs()).unwrap() as f64;
+
+ log_info!(logger, "Fetch first time we saw the current value combination for each direction (prior mutations excepted)");
+ let reminder_lookup_threshold_timestamp = current_timestamp.checked_sub(config::PRUNE_INTERVAL.as_secs()).unwrap() as f64;
+ let params: [&(dyn tokio_postgres::types::ToSql + Sync); 2] = [&channel_ids, &reminder_lookup_threshold_timestamp];
+
+ /*
+ What exactly is the below query doing?
+
+ First, the inner query groups all channel updates by their scid/direction combination,
+ and then sorts those in reverse chronological order by the "seen" column.
+
+ Then, each row is annotated based on whether its subsequent row for the same scid/direction
+ combination has a different value for any one of these six fields:
+ disable, cltv_expiry_delta, htlc_minimum_msat, fee_base_msat, fee_proportional_millionths, htlc_maximum_msat
+ Those are simply the properties we use to keep track of channel mutations.
+
+ The outer query takes all of those results and selects the first value that has a distinct
+ successor for each scid/direction combination. That yields the first instance at which
+ a given channel configuration was received after any prior mutations.
+
+ Knowing that, we can check whether or not there have been any mutations within the
+ reminder requirement window. Because we only care about that window (and potentially the
+ 2-week-window), we pre-filter the scanned updates by only those that were received within
+ 3x the timeframe that we consider necessitates reminders.
+ */
+
+ let mutated_updates = client.query_raw("
+ SELECT DISTINCT ON (short_channel_id, direction) short_channel_id, direction, blob_signed, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen FROM (
+ SELECT short_channel_id, direction, timestamp, seen, blob_signed, COALESCE (
+ disable<>lead(disable) OVER w1
+ OR
+ cltv_expiry_delta<>lead(cltv_expiry_delta) OVER w1
+ OR
+ htlc_minimum_msat<>lead(htlc_minimum_msat) OVER w1
+ OR
+ fee_base_msat<>lead(fee_base_msat) OVER w1
+ OR
+ fee_proportional_millionths<>lead(fee_proportional_millionths) OVER w1
+ OR
+ htlc_maximum_msat<>lead(htlc_maximum_msat) OVER w1,
+ TRUE
+ ) has_distinct_successor
+ FROM channel_updates
+ WHERE short_channel_id = any($1) AND seen >= TO_TIMESTAMP($2)
+ WINDOW w1 AS (PARTITION BY short_channel_id, direction ORDER BY seen DESC)
+ ) _
+ WHERE has_distinct_successor
+ ORDER BY short_channel_id ASC, direction ASC, timestamp DESC
+ ", params).await.unwrap();
+
+ let mut pinned_updates = Box::pin(mutated_updates);
let mut older_latest_directional_update_count = 0;
while let Some(row_res) = pinned_updates.next().await {
let current_row = row_res.unwrap();
- let scid: i64 = current_row.get("short_channel_id");
-
- // annotate this channel as requiring that reminders be sent to the client
- let current_channel_delta = delta_set.entry(scid as u64).or_insert(ChannelDelta::default());
-
- // way might be able to get away with not using this
- (*current_channel_delta).requires_reminder = true;
-
- if let Some(current_channel_info) = network_graph.read_only().channel(scid as u64) {
- if current_channel_info.one_to_two.is_none() || current_channel_info.two_to_one.is_none() {
- // we don't send reminders if we don't have bidirectional update data
+ let seen = current_row.get::<_, i64>("seen") as u32;
+
+ if seen < reminder_threshold_timestamp as u32 {
+ let blob: Vec<u8> = current_row.get("blob_signed");
+ let mut readable = Cursor::new(&blob);
+ let unsigned_channel_update = ChannelUpdate::read(&mut readable).unwrap().contents;
+
+ let scid = unsigned_channel_update.short_channel_id;
+ let direction: bool = current_row.get("direction");
+
+ let current_channel_delta = delta_set.entry(scid).or_insert(ChannelDelta::default());
+
+ // We might be able to get away with not using this
+ (*current_channel_delta).requires_reminder = true;
+ older_latest_directional_update_count += 1;
+
+ if let Some(current_channel_info) = network_graph.read_only().channel(scid) {
+ if current_channel_info.one_to_two.is_none() || current_channel_info.two_to_one.is_none() {
+ // we don't send reminders if we don't have bidirectional update data
+ continue;
+ }
+
+ if let Some(info) = current_channel_info.one_to_two.as_ref() {
+ let flags: u8 = if info.enabled { 0 } else { 2 };
+ let current_update = (*current_channel_delta).updates.0.get_or_insert(DirectedUpdateDelta::default());
+ current_update.serialization_update_flags = Some(flags);
+ }
+
+ if let Some(info) = current_channel_info.two_to_one.as_ref() {
+ let flags: u8 = if info.enabled { 1 } else { 3 };
+ let current_update = (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default());
+ current_update.serialization_update_flags = Some(flags);
+ }
+ } else {
+ // we don't send reminders if we don't have the channel
continue;
}
- if let Some(info) = current_channel_info.one_to_two.as_ref() {
- let flags: u8 = if info.enabled { 0 } else { 2 };
- let current_update = (*current_channel_delta).updates.0.get_or_insert(DirectedUpdateDelta::default());
- current_update.serialization_update_flags = Some(flags);
- }
-
- if let Some(info) = current_channel_info.two_to_one.as_ref() {
- let flags: u8 = if info.enabled { 1 } else { 3 };
- let current_update = (*current_channel_delta).updates.1.get_or_insert(DirectedUpdateDelta::default());
- current_update.serialization_update_flags = Some(flags);
- }
- } else {
- // we don't send reminders if we don't have the channel
- continue;
+ log_gossip!(logger, "Reminder requirement triggered by update for channel {} in direction {}", scid, direction);
}
- older_latest_directional_update_count += 1;
}
log_info!(logger, "Fetched {} update rows of the latest update in the less recently updated direction", older_latest_directional_update_count);
}
let direction: bool = current_reference.get("direction");
let seen = current_reference.get::<_, i64>("seen") as u32;
let blob: Vec<u8> = current_reference.get("blob_signed");
- let mut readable = Cursor::new(blob);
+ let mut readable = Cursor::new(&blob);
let unsigned_channel_update = ChannelUpdate::read(&mut readable).unwrap().contents;
let scid = unsigned_channel_update.short_channel_id;
let mut previous_scid = u64::MAX;
let mut previously_seen_directions = (false, false);
- // let mut previously_seen_directions = (false, false);
let mut intermediate_update_count = 0;
while let Some(row_res) = pinned_updates.next().await {
let intermediate_update = row_res.unwrap();
let direction: bool = intermediate_update.get("direction");
let current_seen_timestamp = intermediate_update.get::<_, i64>("seen") as u32;
let blob: Vec<u8> = intermediate_update.get("blob_signed");
- let mut readable = Cursor::new(blob);
+ let mut readable = Cursor::new(&blob);
let unsigned_channel_update = ChannelUpdate::read(&mut readable).unwrap().contents;
let scid = unsigned_channel_update.short_channel_id;
// determine mutations
if let Some(last_seen_update) = update_delta.last_update_before_seen.as_ref() {
- if unsigned_channel_update.flags != last_seen_update.update.flags {
+ if unsigned_channel_update.channel_flags != last_seen_update.update.channel_flags {
update_delta.mutated_properties.flags = true;
}
if unsigned_channel_update.cltv_expiry_delta != last_seen_update.update.cltv_expiry_delta {
log_info!(logger, "Processed intermediate rows ({}) (delta size: {}): {:?}", intermediate_update_count, delta_set.len(), start.elapsed());
}
+pub(super) async fn fetch_node_updates<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, client: &Client, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> NodeDeltaSet where L::Target: Logger {
+ let start = Instant::now();
+ let last_sync_timestamp_float = last_sync_timestamp as f64;
+
+ let mut delta_set: NodeDeltaSet = {
+ let read_only_graph = network_graph.read_only();
+ read_only_graph.nodes().unordered_iter().flat_map(|(node_id, node_info)| {
+ let details: NodeDetails = if let Some(details) = node_info.announcement_info.as_ref() {
+ NodeDetails {
+ seen: None,
+ features: details.features().clone(),
+ addresses: details.addresses().into_iter().cloned().collect(),
+ }
+ } else {
+ return None;
+ };
+ Some((node_id.clone(), NodeDelta {
+ latest_details: Some(details),
+ strategy: None,
+ last_details_before_seen: None,
+ }))
+ }).collect()
+ };
+
+ let node_ids: Vec<String> = delta_set.keys().into_iter().map(|id| id.as_slice().to_lower_hex_string()).collect();
+ #[cfg(test)]
+ log_info!(logger, "Node IDs: {:?}", node_ids);
+
+ // get the latest node updates prior to last_sync_timestamp
+ let params: [&(dyn tokio_postgres::types::ToSql + Sync); 2] = [&node_ids, &last_sync_timestamp_float];
+ let reference_rows = client.query_raw("
+ SELECT DISTINCT ON (public_key) public_key, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen, announcement_signed
+ FROM node_announcements
+ WHERE
+ public_key = ANY($1) AND
+ seen < TO_TIMESTAMP($2)
+ ORDER BY public_key ASC, seen DESC
+ ", params).await.unwrap();
+ let mut pinned_rows = Box::pin(reference_rows);
+
+ log_info!(logger, "Fetched node announcement reference rows in {:?}", start.elapsed());
+
+ let mut reference_row_count = 0;
+
+ while let Some(row_res) = pinned_rows.next().await {
+ let current_reference = row_res.unwrap();
+
+ let seen = current_reference.get::<_, i64>("seen") as u32;
+ let blob: Vec<u8> = current_reference.get("announcement_signed");
+ let mut readable = Cursor::new(&blob);
+ let unsigned_node_announcement = NodeAnnouncement::read(&mut readable).unwrap().contents;
+ let node_id = unsigned_node_announcement.node_id;
+
+ let current_node_delta = delta_set.entry(node_id).or_insert(NodeDelta::default());
+ (*current_node_delta).last_details_before_seen.get_or_insert_with(|| {
+ let address_set: HashSet<SocketAddress> = unsigned_node_announcement.addresses.into_iter().collect();
+ NodeDetails {
+ seen: Some(seen),
+ features: unsigned_node_announcement.features,
+ addresses: address_set,
+ }
+ });
+ log_gossip!(logger, "Node {} last update before seen: {} (seen at {})", node_id, unsigned_node_announcement.timestamp, seen);
+
+ reference_row_count += 1;
+ }
+
+
+ log_info!(logger, "Processed {} node announcement reference rows (delta size: {}) in {:?}",
+ reference_row_count, delta_set.len(), start.elapsed());
+
+ let current_timestamp = snapshot_reference_timestamp.unwrap_or(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs());
+ let reminder_inclusion_threshold_timestamp = current_timestamp.checked_sub(config::CHANNEL_REMINDER_AGE.as_secs()).unwrap() as u32;
+ let reminder_lookup_threshold_timestamp = current_timestamp.checked_sub(config::PRUNE_INTERVAL.as_secs()).unwrap() as u32;
+
+ // this is the timestamp we need to fetch all relevant updates
+ let include_reminders = should_snapshot_include_reminders(last_sync_timestamp, current_timestamp, &logger);
+ let effective_threshold_timestamp = if include_reminders {
+ std::cmp::min(last_sync_timestamp, reminder_lookup_threshold_timestamp) as f64
+ } else {
+ // If we include reminders, the decision logic is as follows:
+ // If the pre-sync update was more than 6 days ago, serialize in full.
+ // Otherwise:
+ // If the last mutation occurred after the last sync, serialize the mutated properties.
+ // Otherwise:
+ // If the last mutation occurred more than 6 days ago, serialize as a reminder.
+ // Otherwise, don't serialize at all.
+ last_sync_timestamp as f64
+ };
+
+ // get all the intermediate node updates
+ // (to calculate the set of mutated fields for snapshotting, where intermediate updates may
+ // have been omitted)
+ let params: [&(dyn tokio_postgres::types::ToSql + Sync); 2] = [&node_ids, &effective_threshold_timestamp];
+ let intermediate_updates = client.query_raw("
+ SELECT announcement_signed, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen
+ FROM node_announcements
+ WHERE
+ public_key = ANY($1) AND
+ seen >= TO_TIMESTAMP($2)
+ ORDER BY public_key ASC, timestamp DESC
+ ", params).await.unwrap();
+ let mut pinned_updates = Box::pin(intermediate_updates);
+ log_info!(logger, "Fetched intermediate node announcement rows in {:?}", start.elapsed());
+
+ let mut previous_node_id: Option<NodeId> = None;
+
+ let mut intermediate_update_count = 0;
+ let mut has_address_set_changed = false;
+ let mut has_feature_set_changed = false;
+ let mut latest_mutation_timestamp = None;
+ while let Some(row_res) = pinned_updates.next().await {
+ let intermediate_update = row_res.unwrap();
+ intermediate_update_count += 1;
+
+ let current_seen_timestamp = intermediate_update.get::<_, i64>("seen") as u32;
+ let blob: Vec<u8> = intermediate_update.get("announcement_signed");
+ let mut readable = Cursor::new(&blob);
+ let unsigned_node_announcement = NodeAnnouncement::read(&mut readable).unwrap().contents;
+
+ let node_id = unsigned_node_announcement.node_id;
+
+ // get this node's address set
+ let current_node_delta = delta_set.entry(node_id).or_insert(NodeDelta::default());
+ let address_set: HashSet<SocketAddress> = unsigned_node_announcement.addresses.into_iter().collect();
+
+ if previous_node_id != Some(node_id) {
+ // we're traversing a new node id, initialize the values
+ has_address_set_changed = false;
+ has_feature_set_changed = false;
+ latest_mutation_timestamp = None;
+
+ // this is the highest timestamp value, so set the seen timestamp accordingly
+ current_node_delta.latest_details.as_mut().map(|d| d.seen.replace(current_seen_timestamp));
+ }
+
+ if let Some(last_seen_update) = current_node_delta.last_details_before_seen.as_ref() {
+ { // determine the latest mutation timestamp
+ if address_set != last_seen_update.addresses {
+ has_address_set_changed = true;
+ if latest_mutation_timestamp.is_none() {
+ latest_mutation_timestamp = Some(current_seen_timestamp);
+ }
+ }
+ if unsigned_node_announcement.features != last_seen_update.features {
+ has_feature_set_changed = true;
+ if latest_mutation_timestamp.is_none() {
+ latest_mutation_timestamp = Some(current_seen_timestamp);
+ }
+ }
+ }
+
+ if current_seen_timestamp >= last_sync_timestamp {
+ if has_address_set_changed || has_feature_set_changed {
+ // if the last mutation occurred since the last sync, send the mutation variant
+ current_node_delta.strategy = Some(NodeSerializationStrategy::Mutated(MutatedNodeProperties {
+ addresses: has_address_set_changed,
+ features: has_feature_set_changed,
+ }));
+ }
+ } else if include_reminders && latest_mutation_timestamp.unwrap_or(u32::MAX) <= reminder_inclusion_threshold_timestamp {
+ // only send a reminder if the latest mutation occurred at least 6 days ago
+ current_node_delta.strategy = Some(NodeSerializationStrategy::Reminder);
+ }
+
+ // Note that we completely ignore the case when the last mutation occurred less than
+ // 6 days ago, but prior to the last sync. In that scenario, we send nothing.
+
+ } else {
+ // absent any update that was seen prior to the last sync, send the full version
+ current_node_delta.strategy = Some(NodeSerializationStrategy::Full);
+ }
+
+ previous_node_id = Some(node_id);
+ }
+ log_info!(logger, "Processed intermediate node announcement rows ({}) (delta size: {}): {:?}", intermediate_update_count, delta_set.len(), start.elapsed());
+
+ delta_set
+}
+
pub(super) fn filter_delta_set<L: Deref>(delta_set: &mut DeltaSet, logger: L) where L::Target: Logger {
let original_length = delta_set.len();
let keys: Vec<u64> = delta_set.keys().cloned().collect();
panic!("db init error: {}", initialization_error);
}
- let initialization = client
- .execute(config::db_announcement_table_creation_query(), &[])
- .await;
- if let Err(initialization_error) = initialization {
- panic!("db init error: {}", initialization_error);
- }
+ let table_creation_queries = [
+ config::db_announcement_table_creation_query(),
+ config::db_channel_update_table_creation_query(),
+ config::db_channel_update_table_creation_query(),
+ config::db_node_announcement_table_creation_query()
+ ];
- let initialization = client
- .execute(
- config::db_channel_update_table_creation_query(),
- &[],
- )
- .await;
- if let Err(initialization_error) = initialization {
- panic!("db init error: {}", initialization_error);
+ for current_table_creation_query in table_creation_queries {
+ let initialization = client
+ .execute(current_table_creation_query, &[])
+ .await;
+ if let Err(initialization_error) = initialization {
+ panic!("db init error: {}", initialization_error);
+ }
}
let initialization = client
let connections_cache_ref = Arc::clone(&connections_cache);
match gossip_message {
+ GossipMessage::NodeAnnouncement(announcement, seen_override) => {
+ let public_key_hex = announcement.contents.node_id.to_string();
+
+ let mut announcement_signed = Vec::new();
+ announcement.write(&mut announcement_signed).unwrap();
+
+ let features = announcement.contents.features.encode();
+ let timestamp = announcement.contents.timestamp as i64;
+
+ let mut serialized_addresses = Vec::new();
+ announcement.contents.addresses.write(&mut serialized_addresses).unwrap();
+
+ let _task = self.tokio_runtime.spawn(async move {
+ if cfg!(test) && seen_override.is_some() {
+ tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+ .execute("INSERT INTO node_announcements (\
+ public_key, \
+ features, \
+ socket_addresses, \
+ timestamp, \
+ announcement_signed, \
+ seen \
+ ) VALUES ($1, $2, $3, $4, $5, TO_TIMESTAMP($6))", &[
+ &public_key_hex,
+ &features,
+ &serialized_addresses,
+ ×tamp,
+ &announcement_signed,
+ &(seen_override.unwrap() as f64)
+ ])).await.unwrap().unwrap();
+ } else {
+ tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+ .execute("INSERT INTO node_announcements (\
+ public_key, \
+ features, \
+ socket_addresses, \
+ timestamp, \
+ announcement_signed \
+ ) VALUES ($1, $2, $3, $4, $5)", &[
+ &public_key_hex,
+ &features,
+ &serialized_addresses,
+ ×tamp,
+ &announcement_signed,
+ ])).await.unwrap().unwrap();
+ }
+ let mut connections_set = connections_cache_ref.lock().await;
+ connections_set.push(client);
+ limiter_ref.add_permits(1);
+ });
+ #[cfg(test)]
+ tasks_spawned.push(_task);
+ },
GossipMessage::ChannelAnnouncement(announcement, seen_override) => {
let scid = announcement.contents.short_channel_id as i64;
let timestamp = update.contents.timestamp as i64;
- let direction = (update.contents.flags & 1) == 1;
- let disable = (update.contents.flags & 2) > 0;
+ let direction = (update.contents.channel_flags & 1) == 1;
+ let disable = (update.contents.channel_flags & 2) > 0;
let cltv_expiry_delta = update.contents.cltv_expiry_delta as i32;
let htlc_minimum_msat = update.contents.htlc_minimum_msat as i64;
×tamp,
#[cfg(test)]
&_seen_timestamp,
- &(update.contents.flags as i16),
+ &(update.contents.channel_flags as i16),
&direction,
&disable,
&cltv_expiry_delta,
}
#[cfg(test)]
for task in tasks_spawned {
- task.await;
+ task.await.unwrap();
}
}
use bitcoin::Network;
use bitcoin::blockdata::constants::ChainHash;
+use lightning::ln::features::NodeFeatures;
use lightning::ln::msgs::{UnsignedChannelAnnouncement, UnsignedChannelUpdate};
use lightning::util::ser::{BigSize, Writeable};
use crate::config;
-use crate::lookup::{DeltaSet, DirectedUpdateDelta};
+use crate::lookup::{DeltaSet, DirectedUpdateDelta, NodeDeltaSet};
pub(super) struct SerializationSet {
pub(super) announcements: Vec<UnsignedChannelAnnouncement>,
pub(super) updates: Vec<UpdateSerialization>,
pub(super) full_update_defaults: DefaultUpdateValues,
+ pub(super) node_announcement_feature_defaults: Vec<NodeFeatures>,
+ pub(super) node_mutations: NodeDeltaSet,
pub(super) latest_seen: u32,
pub(super) chain_hash: ChainHash,
}
fn flags(&self) -> u8 {
match self {
UpdateSerialization::Full(latest_update)|
- UpdateSerialization::Incremental(latest_update, _) => latest_update.flags,
+ UpdateSerialization::Incremental(latest_update, _) => latest_update.channel_flags,
UpdateSerialization::Reminder(_, flags) => *flags,
}
}
}
+pub(super) struct MutatedNodeProperties {
+ pub(super) addresses: bool,
+ pub(super) features: bool,
+}
+
+pub(super) enum NodeSerializationStrategy {
+ /// Only serialize the aspects of the node ID that have been mutated. Skip if they haven't been
+ Mutated(MutatedNodeProperties),
+ /// Whether or not the addresses or features have been mutated, serialize this node in full. It
+ /// may have been purged from the client.
+ Full,
+ /// This node ID has been seen recently enough to not have been pruned, and this update serves
+ /// solely the purpose of delaying any pruning, without applying any mutations
+ Reminder
+}
+
struct FullUpdateValueHistograms {
cltv_expiry_delta: HashMap<u16, usize>,
htlc_minimum_msat: HashMap<u64, usize>,
htlc_maximum_msat: HashMap<u64, usize>,
}
-pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32) -> SerializationSet {
+pub(super) fn serialize_delta_set(channel_delta_set: DeltaSet, node_delta_set: NodeDeltaSet, last_sync_timestamp: u32) -> SerializationSet {
let mut serialization_set = SerializationSet {
announcements: vec![],
updates: vec![],
full_update_defaults: Default::default(),
+ node_announcement_feature_defaults: vec![],
+ node_mutations: Default::default(),
chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
latest_seen: 0,
};
// if the previous seen update happened more than 6 days ago, the client may have pruned it, and an incremental update wouldn't work
let non_incremental_previous_update_threshold_timestamp = SystemTime::now().checked_sub(config::CHANNEL_REMINDER_AGE).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;
- for (scid, channel_delta) in delta_set.into_iter() {
+ for (scid, channel_delta) in channel_delta_set.into_iter() {
// any announcement chain hash is gonna be the same value. Just set it from the first one.
let channel_announcement_delta = channel_delta.announcement.as_ref().unwrap();
// we don't count flags as mutated properties
serialization_set.updates.push(
UpdateSerialization::Incremental(latest_update, mutated_properties));
+ } else if channel_delta.requires_reminder {
+ if let Some(flags) = updates.serialization_update_flags {
+ serialization_set.updates.push(UpdateSerialization::Reminder(scid, flags));
+ }
}
} else {
// serialize the full update
};
serialization_set.full_update_defaults = default_update_values;
+
+ serialization_set.node_mutations = node_delta_set.into_iter().filter_map(|(id, mut delta)| {
+ if delta.strategy.is_none() {
+ return None;
+ }
+ if let Some(last_details_before_seen) = delta.last_details_before_seen.as_ref() {
+ if let Some(last_details_seen) = last_details_before_seen.seen {
+ if last_details_seen <= non_incremental_previous_update_threshold_timestamp {
+ delta.strategy = Some(NodeSerializationStrategy::Full)
+ }
+ }
+ Some((id, delta))
+ } else {
+ None
+ }
+ }).collect();
+
+ let mut node_feature_histogram: HashMap<&NodeFeatures, usize> = Default::default();
+ for (_id, delta) in serialization_set.node_mutations.iter() {
+ // consider either full or feature-mutating serializations for histogram
+ let mut should_add_to_histogram = matches!(delta.strategy, Some(NodeSerializationStrategy::Full));
+ if let Some(NodeSerializationStrategy::Mutated(mutation)) = delta.strategy.as_ref() {
+ should_add_to_histogram = mutation.features;
+ }
+
+ if should_add_to_histogram {
+ if let Some(latest_details) = delta.latest_details.as_ref() {
+ *node_feature_histogram.entry(&latest_details.features).or_insert(0) += 1;
+ };
+ }
+ }
+ serialization_set.node_announcement_feature_defaults = find_leading_histogram_entries(node_feature_histogram, config::NODE_DEFAULT_FEATURE_COUNT as usize);
+
serialization_set
}
// though for htlc maximum msat it could be a u64::max
default
}
+
+pub(super) fn find_leading_histogram_entries(histogram: HashMap<&NodeFeatures, usize>, count: usize) -> Vec<NodeFeatures> {
+ let mut entry_counts: Vec<_> = histogram.iter().filter(|&(_, &count)| count > 1).collect();
+ entry_counts.sort_by(|a, b| b.1.cmp(&a.1));
+ entry_counts.into_iter().take(count).map(|(&features, _count)| features.clone()).collect()
+}
let relative_symlink_to_snapshot_path = "../snapshots";
// 1. get the current timestamp
- let snapshot_generation_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
- let reference_timestamp = Self::round_down_to_nearest_multiple(snapshot_generation_timestamp, snapshot_interval as u64);
+ let snapshot_generation_time = SystemTime::now();
+ let snapshot_generation_timestamp = snapshot_generation_time.duration_since(UNIX_EPOCH).unwrap().as_secs();
+ let reference_timestamp = Self::round_down_to_nearest_multiple(snapshot_generation_timestamp, snapshot_interval);
log_info!(self.logger, "Capturing snapshots at {} for: {}", snapshot_generation_timestamp, reference_timestamp);
// 2. sleep until the next round interval
// channel updates
// purge and recreate the pending directories
- if fs::metadata(&pending_snapshot_directory).is_ok() {
- fs::remove_dir_all(&pending_snapshot_directory).expect("Failed to remove pending snapshot directory.");
- }
- if fs::metadata(&pending_symlink_directory).is_ok() {
- fs::remove_dir_all(&pending_symlink_directory).expect("Failed to remove pending symlink directory.");
+ let suffixes = [("", ""), ("/v2", "../")];
+ for (suffix, _) in suffixes {
+ let versioned_snapshot_directory = format!("{}{}", pending_snapshot_directory, suffix);
+ let versioned_symlink_directory = format!("{}{}", pending_symlink_directory, suffix);
+
+ if fs::metadata(&versioned_snapshot_directory).is_ok() {
+ fs::remove_dir_all(&versioned_snapshot_directory).expect("Failed to remove pending snapshot directory.");
+ }
+ if fs::metadata(&versioned_symlink_directory).is_ok() {
+ fs::remove_dir_all(&versioned_symlink_directory).expect("Failed to remove pending symlink directory.");
+ }
+ fs::create_dir_all(&versioned_snapshot_directory).expect("Failed to create pending snapshot directory");
+ fs::create_dir_all(&versioned_symlink_directory).expect("Failed to create pending symlink directory");
}
- fs::create_dir_all(&pending_snapshot_directory).expect("Failed to create pending snapshot directory");
- fs::create_dir_all(&pending_symlink_directory).expect("Failed to create pending symlink directory");
let mut snapshot_sync_timestamps: Vec<(u64, u64)> = Vec::new();
for current_scope in snapshot_scopes {
{
log_info!(self.logger, "Calculating {}-second snapshot", current_scope);
// calculate the snapshot
- let snapshot = super::serialize_delta(network_graph_clone, current_last_sync_timestamp.clone() as u32, self.logger.clone()).await;
+ let delta = super::calculate_delta(network_graph_clone.clone(), current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
+ let snapshot_v1 = super::serialize_delta(&delta, 1, self.logger.clone());
+ let snapshot_v2 = super::serialize_delta(&delta, 2, self.logger.clone());
// persist the snapshot and update the symlink
let snapshot_filename = format!("snapshot__calculated-at:{}__range:{}-scope__previous-sync:{}.lngossip", reference_timestamp, current_scope, current_last_sync_timestamp);
- let snapshot_path = format!("{}/{}", pending_snapshot_directory, snapshot_filename);
- log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot.message_count, snapshot.announcement_count, snapshot.update_count, snapshot.update_count_full, snapshot.update_count_incremental);
- fs::write(&snapshot_path, snapshot.data).unwrap();
+ let snapshot_path_v1 = format!("{}/{}", pending_snapshot_directory, snapshot_filename);
+ let snapshot_path_v2 = format!("{}/v2/{}", pending_snapshot_directory, snapshot_filename);
+ log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot_v1.message_count, snapshot_v1.channel_announcement_count, snapshot_v1.update_count, snapshot_v1.update_count_full, snapshot_v1.update_count_incremental);
+ fs::write(&snapshot_path_v1, snapshot_v1.data).unwrap();
+ fs::write(&snapshot_path_v2, snapshot_v2.data).unwrap();
snapshot_filenames_by_scope.insert(current_scope.clone(), snapshot_filename);
}
}
};
log_info!(self.logger, "i: {}, referenced scope: {}", i, referenced_scope);
- let snapshot_filename = snapshot_filenames_by_scope.get(&referenced_scope).unwrap();
- let relative_snapshot_path = format!("{}/{}", relative_symlink_to_snapshot_path, snapshot_filename);
+ for (suffix, path_to_root) in suffixes {
+ let snapshot_filename = snapshot_filenames_by_scope.get(&referenced_scope).unwrap();
+ let relative_snapshot_path = format!("{}{}{}/{}", path_to_root, relative_symlink_to_snapshot_path, suffix, snapshot_filename);
- let canonical_last_sync_timestamp = if i == 0 {
- // special-case 0 to always refer to a full/initial sync
- 0
- } else {
- reference_timestamp.saturating_sub(granularity_interval.saturating_mul(i))
- };
- let symlink_path = format!("{}/{}.bin", pending_symlink_directory, canonical_last_sync_timestamp);
+ let canonical_last_sync_timestamp = if i == 0 {
+ // special-case 0 to always refer to a full/initial sync
+ 0
+ } else {
+ reference_timestamp.saturating_sub(granularity_interval.saturating_mul(i))
+ };
+ let symlink_path = format!("{}{}/{}.bin", pending_symlink_directory, suffix, canonical_last_sync_timestamp);
- log_info!(self.logger, "Symlinking: {} -> {} ({} -> {}", i, referenced_scope, symlink_path, relative_snapshot_path);
- symlink(&relative_snapshot_path, &symlink_path).unwrap();
+ log_info!(self.logger, "Symlinking: {} -> {} ({} -> {}", i, referenced_scope, symlink_path, relative_snapshot_path);
+ symlink(&relative_snapshot_path, &symlink_path).unwrap();
+ }
}
let update_time_path = format!("{}/update_time.txt", pending_symlink_directory);
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use hex_conservative::DisplayHex;
-use lightning::ln::features::ChannelFeatures;
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-use lightning::routing::gossip::{NetworkGraph, NodeId};
+use lightning::ln::features::{ChannelFeatures, NodeFeatures};
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
+use lightning::routing::gossip::{NetworkGraph, NodeAlias, NodeId};
use lightning::util::ser::Writeable;
use lightning_rapid_gossip_sync::RapidGossipSync;
-use crate::{config, serialize_delta};
+use crate::{calculate_delta, config, serialize_delta};
use crate::persistence::GossipPersister;
use crate::snapshot::Snapshotter;
use crate::types::{GossipMessage, tests::TestLogger};
})
}
-fn generate_announcement(short_channel_id: u64) -> ChannelAnnouncement {
+fn generate_node_announcement(private_key: Option<SecretKey>) -> NodeAnnouncement {
+ let secp_context = Secp256k1::new();
+
+ let random_private_key = private_key.unwrap_or(SecretKey::from_slice(&[1; 32]).unwrap());
+ let random_public_key = random_private_key.public_key(&secp_context);
+ let node_id = NodeId::from_pubkey(&random_public_key);
+
+ let announcement = UnsignedNodeAnnouncement {
+ features: NodeFeatures::empty(),
+ timestamp: 0,
+ node_id,
+ rgb: [0, 128, 255],
+ alias: NodeAlias([0; 32]),
+ addresses: vec![],
+ excess_data: vec![],
+ excess_address_data: vec![],
+ };
+
+ let msg_hash = bitcoin::secp256k1::Message::from_slice(&Sha256dHash::hash(&announcement.encode()[..])[..]).unwrap();
+ let signature = secp_context.sign_ecdsa(&msg_hash, &random_private_key);
+
+ NodeAnnouncement {
+ signature,
+ contents: announcement,
+ }
+}
+
+
+fn generate_channel_announcement(short_channel_id: u64) -> ChannelAnnouncement {
let secp_context = Secp256k1::new();
let random_private_key_1 = SecretKey::from_slice(&[1; 32]).unwrap();
chain_hash: genesis_hash(),
short_channel_id: scid,
timestamp,
- flags: 0 | flag_mask,
+ message_flags: 0,
+ channel_flags: flag_mask,
cltv_expiry_delta: expiry_delta,
htlc_minimum_msat: min_msat,
htlc_maximum_msat: max_msat,
IS_TEST_SCHEMA_CLEAN.with(|cleanliness_reference| {
let is_clean_option = cleanliness_reference.borrow();
if let Some(is_clean) = *is_clean_option {
+ if std::thread::panicking() {
+ return;
+ }
assert_eq!(is_clean, true);
}
});
println!("timestamp: {}", timestamp);
{ // seed the db
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 5, 0);
let update_2 = generate_update(short_channel_id, true, timestamp, 0, 0, 0, 10, 0);
persister.persist_gossip().await;
}
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
clean_test_db().await;
assert_eq!(channel_count, 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
let client_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
}).await.unwrap();
}
+#[tokio::test]
+async fn test_node_announcement_persistence() {
+ let _sanitizer = SchemaSanitizer::new();
+ let logger = Arc::new(TestLogger::new());
+ let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
+ let network_graph_arc = Arc::new(network_graph);
+ let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
+
+ { // seed the db
+ let mut announcement = generate_node_announcement(None);
+ receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), None)).await.unwrap();
+ receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(12345))).await.unwrap();
+
+ {
+ // modify announcement to contain a bunch of addresses
+ announcement.contents.addresses.push(SocketAddress::Hostname {
+ hostname: "google.com".to_string().try_into().unwrap(),
+ port: 443,
+ });
+ announcement.contents.addresses.push(SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 9635 });
+ announcement.contents.addresses.push(SocketAddress::TcpIpV6 { addr: [1; 16], port: 1337 });
+ announcement.contents.addresses.push(SocketAddress::OnionV2([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
+ announcement.contents.addresses.push(SocketAddress::OnionV3 {
+ ed25519_pubkey: [1; 32],
+ checksum: 2,
+ version: 3,
+ port: 4,
+ });
+ }
+ receiver.send(GossipMessage::NodeAnnouncement(announcement, Some(12345))).await.unwrap();
+
+ drop(receiver);
+ persister.persist_gossip().await;
+
+ tokio::task::spawn_blocking(move || {
+ drop(persister);
+ }).await.unwrap();
+ }
+ clean_test_db().await;
+}
+
+#[tokio::test]
+async fn test_node_announcement_delta_detection() {
+ let _sanitizer = SchemaSanitizer::new();
+ let logger = Arc::new(TestLogger::new());
+ let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
+ let network_graph_arc = Arc::new(network_graph);
+ let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
+
+ let timestamp = current_time() - 10;
+
+ { // seed the db
+
+ { // necessary for the node announcements to be considered relevant
+ let announcement = generate_channel_announcement(1);
+ let update_1 = generate_update(1, false, timestamp, 0, 0, 0, 6, 0);
+ let update_2 = generate_update(1, true, timestamp, 0, 0, 0, 6, 0);
+
+ network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_1.contents).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_2.contents).unwrap();
+
+ receiver.send(GossipMessage::ChannelAnnouncement(announcement, Some(timestamp))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_1, Some(timestamp))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_2, Some(timestamp))).await.unwrap();
+ }
+
+ let mut announcement = generate_node_announcement(None);
+ announcement.contents.timestamp = timestamp - 10;
+ network_graph_arc.update_node_from_unsigned_announcement(&announcement.contents).unwrap();
+ receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(announcement.contents.timestamp))).await.unwrap();
+ announcement.contents.timestamp = timestamp - 8;
+ network_graph_arc.update_node_from_unsigned_announcement(&announcement.contents).unwrap();
+ receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(announcement.contents.timestamp))).await.unwrap();
+
+ {
+ let mut current_announcement = generate_node_announcement(Some(SecretKey::from_slice(&[2; 32]).unwrap()));
+ current_announcement.contents.features = NodeFeatures::from_be_bytes(vec![23, 48]);
+ current_announcement.contents.timestamp = timestamp;
+ network_graph_arc.update_node_from_unsigned_announcement(¤t_announcement.contents).unwrap();
+ receiver.send(GossipMessage::NodeAnnouncement(current_announcement, Some(timestamp))).await.unwrap();
+ }
+
+ {
+ let mut current_announcement = generate_node_announcement(Some(SecretKey::from_slice(&[3; 32]).unwrap()));
+ current_announcement.contents.features = NodeFeatures::from_be_bytes(vec![22, 49]);
+ current_announcement.contents.timestamp = timestamp;
+ receiver.send(GossipMessage::NodeAnnouncement(current_announcement, Some(timestamp))).await.unwrap();
+ }
+
+ {
+ // modify announcement to contain a bunch of addresses
+ announcement.contents.addresses.push(SocketAddress::Hostname {
+ hostname: "google.com".to_string().try_into().unwrap(),
+ port: 443,
+ });
+ announcement.contents.features = NodeFeatures::from_be_bytes(vec![23, 48]);
+ announcement.contents.addresses.push(SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 9635 });
+ announcement.contents.addresses.push(SocketAddress::TcpIpV6 { addr: [1; 16], port: 1337 });
+ announcement.contents.addresses.push(SocketAddress::OnionV2([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
+ announcement.contents.addresses.push(SocketAddress::OnionV3 {
+ ed25519_pubkey: [1; 32],
+ checksum: 2,
+ version: 3,
+ port: 4,
+ });
+ announcement.contents.timestamp = timestamp;
+ }
+ network_graph_arc.update_node_from_unsigned_announcement(&announcement.contents).unwrap();
+ receiver.send(GossipMessage::NodeAnnouncement(announcement, Some(timestamp))).await.unwrap();
+
+ drop(receiver);
+ persister.persist_gossip().await;
+
+ tokio::task::spawn_blocking(move || {
+ drop(persister);
+ }).await.unwrap();
+ }
+
+ let delta = calculate_delta(network_graph_arc.clone(), timestamp - 5, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 2, logger.clone());
+ clean_test_db().await;
+
+ assert_eq!(serialization.message_count, 3);
+ assert_eq!(serialization.node_announcement_count, 2);
+ assert_eq!(serialization.node_update_count, 1);
+ assert_eq!(serialization.node_feature_update_count, 1);
+ assert_eq!(serialization.node_address_update_count, 1);
+}
+
/// If a channel has only seen updates in one direction, it should not be announced
#[tokio::test]
async fn test_unidirectional_intermediate_update_consideration() {
println!("timestamp: {}", timestamp);
{ // seed the db
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 6, 0);
let update_2 = generate_update(short_channel_id, true, timestamp + 1, 0, 0, 0, 3, 0);
let update_3 = generate_update(short_channel_id, true, timestamp + 2, 0, 0, 0, 4, 0);
let client_graph_arc = Arc::new(client_graph);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
- let serialization = serialize_delta(network_graph_arc.clone(), timestamp + 1, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 1 update rows of the first update in a new direction", 1);
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed 1 reference rows", 1);
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
assert_eq!(serialization.update_count_full, 2);
assert_eq!(serialization.update_count_incremental, 0);
println!("timestamp: {}", timestamp);
{ // seed the db
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 5, 0);
let update_2 = generate_update(short_channel_id, false, timestamp + 1, 0, 0, 0, 4, 0);
let update_3 = generate_update(short_channel_id, false, timestamp + 2, 0, 0, 0, 3, 0);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 1);
- let serialization = serialize_delta(network_graph_arc.clone(), timestamp + 1, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 0 update rows of the first update in a new direction", 1);
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed 2 reference rows", 1);
logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
assert_eq!(serialization.message_count, 1);
- assert_eq!(serialization.announcement_count, 0);
+ assert_eq!(serialization.channel_announcement_count, 0);
assert_eq!(serialization.update_count, 1);
assert_eq!(serialization.update_count_full, 0);
assert_eq!(serialization.update_count_incremental, 1);
clean_test_db().await;
}
+#[tokio::test]
+async fn test_channel_reminders() {
+ let _sanitizer = SchemaSanitizer::new();
+
+ let logger = Arc::new(TestLogger::new());
+ let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
+ let network_graph_arc = Arc::new(network_graph);
+ let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
+
+ let timestamp = current_time();
+ println!("timestamp: {}", timestamp);
+ let channel_reminder_delta = config::CHANNEL_REMINDER_AGE.as_secs() as u32;
+
+ { // seed the db
+ { // unupdated channel
+ let short_channel_id = 1;
+ let announcement = generate_channel_announcement(short_channel_id);
+ let update_1 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 1, 0, 0, 0, 5, 0);
+ let update_2 = generate_update(short_channel_id, true, timestamp - channel_reminder_delta - 1, 0, 0, 0, 3, 0);
+
+ network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_1.contents).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_2.contents).unwrap();
+
+ receiver.send(GossipMessage::ChannelAnnouncement(announcement, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_1, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_2, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+ }
+ { // unmodified but updated channel
+ let short_channel_id = 2;
+ let announcement = generate_channel_announcement(short_channel_id);
+ let update_1 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 10, 0, 0, 0, 5, 0);
+ // in the false direction, we have one update that's different prior
+ let update_2 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 5, 0, 1, 0, 5, 0);
+ let update_3 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 1, 0, 0, 0, 5, 0);
+ let update_4 = generate_update(short_channel_id, true, timestamp - channel_reminder_delta - 1, 0, 0, 0, 3, 0);
+ let update_5 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta + 10, 0, 0, 0, 5, 0);
+ let update_6 = generate_update(short_channel_id, true, timestamp - channel_reminder_delta + 10, 0, 0, 0, 3, 0);
+ let update_7 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta + 20, 0, 0, 0, 5, 0);
+ let update_8 = generate_update(short_channel_id, true, timestamp - channel_reminder_delta + 20, 0, 0, 0, 3, 0);
+
+ network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_7.contents).unwrap();
+ network_graph_arc.update_channel_unsigned(&update_8.contents).unwrap();
+
+ receiver.send(GossipMessage::ChannelAnnouncement(announcement, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_1, Some(timestamp - channel_reminder_delta - 10))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_2, Some(timestamp - channel_reminder_delta - 5))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_3, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_4, Some(timestamp - channel_reminder_delta - 1))).await.unwrap();
+
+ receiver.send(GossipMessage::ChannelUpdate(update_5, Some(timestamp - channel_reminder_delta + 10))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_6, Some(timestamp - channel_reminder_delta + 10))).await.unwrap();
+
+ receiver.send(GossipMessage::ChannelUpdate(update_7, Some(timestamp - channel_reminder_delta + 20))).await.unwrap();
+ receiver.send(GossipMessage::ChannelUpdate(update_8, Some(timestamp - channel_reminder_delta + 20))).await.unwrap();
+ }
+ drop(receiver);
+ persister.persist_gossip().await;
+ }
+
+ let channel_count = network_graph_arc.read_only().channels().len();
+ assert_eq!(channel_count, 2);
+
+ let delta = calculate_delta(network_graph_arc.clone(), timestamp - channel_reminder_delta + 15, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
+
+ logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 0 update rows of the first update in a new direction", 1);
+ logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 4 update rows of the latest update in the less recently updated direction", 1);
+ logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed 2 reference rows", 1);
+ logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
+
+ assert_eq!(serialization.message_count, 4);
+ assert_eq!(serialization.channel_announcement_count, 0);
+ assert_eq!(serialization.update_count, 4);
+ assert_eq!(serialization.update_count_full, 0);
+ assert_eq!(serialization.update_count_incremental, 4);
+
+ tokio::task::spawn_blocking(move || {
+ drop(persister);
+ }).await.unwrap();
+
+ clean_test_db().await;
+}
+
#[tokio::test]
async fn test_full_snapshot_recency() {
let _sanitizer = SchemaSanitizer::new();
{ // seed the db
let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
let client_graph_arc = Arc::new(client_graph);
{ // sync after initial seed
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
{ // seed the db
let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
let client_graph_arc = Arc::new(client_graph);
{ // sync after initial seed
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
{ // seed the db
let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
let client_graph_arc = Arc::new(client_graph);
{ // sync after initial seed
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
{ // seed the db
let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
let client_graph_arc = Arc::new(client_graph);
{ // sync after initial seed
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 1);
assert_eq!(serialization.message_count, 3);
- assert_eq!(serialization.announcement_count, 1);
+ assert_eq!(serialization.channel_announcement_count, 1);
assert_eq!(serialization.update_count, 2);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
let secondary_channel_id = main_channel_id + 1;
{ // main channel
- let announcement = generate_announcement(main_channel_id);
+ let announcement = generate_channel_announcement(main_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
}
{ // secondary channel
- let announcement = generate_announcement(secondary_channel_id);
+ let announcement = generate_channel_announcement(secondary_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
}
let client_graph_arc = Arc::new(client_graph);
{ // sync after initial seed
- let serialization = serialize_delta(network_graph_arc.clone(), 0, logger.clone()).await;
+ let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+ let serialization = serialize_delta(&delta, 1, logger.clone());
logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 2", 1);
let channel_count = network_graph_arc.read_only().channels().len();
assert_eq!(channel_count, 2);
assert_eq!(serialization.message_count, 6);
- assert_eq!(serialization.announcement_count, 2);
+ assert_eq!(serialization.channel_announcement_count, 2);
assert_eq!(serialization.update_count, 4);
let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
{ // seed the db
let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
- let announcement = generate_announcement(short_channel_id);
+ let announcement = generate_channel_announcement(short_channel_id);
network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
use std::sync::Arc;
use lightning::sign::KeysManager;
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate};
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement};
use lightning::ln::peer_handler::{ErroringMessageHandler, IgnoringMessageHandler, PeerManager};
use lightning::util::logger::{Logger, Record};
use crate::config;
#[derive(Debug)]
pub(crate) enum GossipMessage {
+ NodeAnnouncement(NodeAnnouncement, Option<u32>),
// the second element is an optional override for the seen value
ChannelAnnouncement(ChannelAnnouncement, Option<u32>),
ChannelUpdate(ChannelUpdate, Option<u32>),
}
let mut transaction = block.txdata.swap_remove(transaction_index as usize);
if output_index as usize >= transaction.output.len() {
- log_error!(logger, "Could't find output {} in transaction {}", output_index, transaction.txid());
+ log_error!(logger, "Could't find output {} in transaction {}", output_index, transaction.compute_txid());
return Err(UtxoLookupError::UnknownTx);
}
Ok(transaction.output.swap_remove(output_index as usize))