]> git.bitcoin.ninja Git - flowspec-xdp/commitdiff
Rewrite per-source ratelimiting rules to use a custom hashtable
authorMatt Corallo <git@bluematt.me>
Thu, 10 Jun 2021 03:43:02 +0000 (03:43 +0000)
committerMatt Corallo <git@bluematt.me>
Thu, 10 Jun 2021 19:23:54 +0000 (19:23 +0000)
The in-kernel hashtable isn't at all fancy, and we can just use a
custom one to get basically the same outcomes, with much less
overhead due to the per-CPU stuff we have to do to get sensible
multicore access rules.

genrules.py
install.sh
xdp.c

index a8ad71a2a578396ba26d7e9761b16f97d3e4ae28..a09fde157e9eed0988e54a385a79cbba6721ac15 100755 (executable)
@@ -412,21 +412,24 @@ with open("rules.h", "w") as out:
                             first_action += "struct ratelimit *rate = bpf_map_lookup_elem(&rate_map, &ratelimitidx);\n"
                             ratelimitcnt += 1
                         else:
-                            spin_lock = "/* No locking as we're per-CPU */"
-                            spin_unlock = "/* No locking as we're per-CPU */"
+                            spin_lock = "/* No locking as we're locked in get_v*_persrc_ratelimit */"
+                            spin_unlock = "bpf_spin_unlock(rate_ptr.lock);"
                             if proto == 4:
                                 if mid_byte > 32:
                                     continue
                                 first_action += f"const uint32_t srcip = ip->saddr & MASK4({mid_byte});\n"
                                 first_action += f"void *rate_map = &v4_src_rate_{len(v4persrcratelimits)};\n"
+                                first_action += f"struct persrc_rate4_ptr rate_ptr = get_v4_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 1024});\n"
+                                first_action += f"struct persrc_rate4_entry *rate = rate_ptr.rate;\n"
                                 v4persrcratelimits.append((high_byte + 1) * 1024)
                             else:
                                 if mid_byte > 128:
                                     continue
                                 first_action += f"const uint128_t srcip = ip6->saddr & MASK6({mid_byte});\n"
                                 first_action += f"void *rate_map = &v6_src_rate_{len(v6persrcratelimits)};\n"
+                                first_action += f"struct persrc_rate6_ptr rate_ptr = get_v6_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 1024});\n"
+                                first_action += f"struct persrc_rate6_entry *rate = rate_ptr.rate;\n"
                                 v6persrcratelimits.append((high_byte + 1) * 1024)
-                            first_action += f"struct percpu_ratelimit *rate = bpf_map_lookup_elem(rate_map, &srcip);\n"
                         first_action +=  "if (rate) {\n"
                         first_action += f"\t{spin_lock}\n"
                         first_action +=  "\tif (likely(rate->sent_rate > 0))" + " {\n"
@@ -449,15 +452,6 @@ with open("rules.h", "w") as out:
                         first_action +=  "\t\t{stats_replace}\n"
                         first_action +=  "\t\treturn XDP_DROP;\n"
                         first_action +=  "\t}\n"
-                        if ty == "0x8306" or ty == "0x830c":
-                            first_action +=  "} else {\n"
-                            first_action +=  "\tstruct percpu_ratelimit new_rate = { .sent_time = time, };\n"
-                            first_action +=  "\trate = &new_rate;\n"
-                            if ty == "0x8006" or ty == "0x8306":
-                                first_action += f"\t\trate->sent_rate = data_end - pktdata;\n"
-                            else:
-                                first_action += f"\t\trate->sent_rate = 1;\n"
-                            first_action +=  "\tbpf_map_update_elem(rate_map, &srcip, rate, BPF_ANY);\n"
                         first_action +=  "}\n"
                 elif ty == "0x8007":
                     if low_bytes & 1 == 0:
@@ -509,6 +503,6 @@ with open("rules.h", "w") as out:
             out.write("#define PARSE_V6_FRAG PARSE\n")
     with open("maps.h", "w") as out:
         for idx, limit in enumerate(v4persrcratelimits):
-            out.write(f"V4_SRC_RATE_DEFINE({idx}, {limit})\n")
+            out.write(f"SRC_RATE_DEFINE(4, {idx}, {limit})\n")
         for idx, limit in enumerate(v6persrcratelimits):
-            out.write(f"V6_SRC_RATE_DEFINE({idx}, {limit})\n")
+            out.write(f"SRC_RATE_DEFINE(6, {idx}, {limit})\n")
index d77a8b0a66a5dd941fa78fdb39e14488da44e2cd..b8f83ab1e85823bfdfaadb0dc2ae47c3c6d622ab 100755 (executable)
@@ -15,10 +15,12 @@ fi
 RULES="$(birdc show route table flowspec4 primary all)
 $(birdc show route table flowspec6 primary all)"
 
+echo "const uint8_t COMPILE_TIME_RAND[] = { $(dd if=/dev/urandom of=/dev/stdout bs=1 count=8 2>/dev/null | hexdump -e '4/1 "0x%02x, "') };" > rand.h
+
 echo "$RULES" | ./genrules.py --8021q=drop-vlan --v6frag=ignore-parse-if-rule --ihl=parse-options
-clang $CLANG_ARGS -g -std=c99 -pedantic -Wall -Wextra -Wno-pointer-arith -Wno-unused-variable -O3 -emit-llvm -c xdp.c -o xdp.bc
+clang $CLANG_ARGS -g -std=c99 -pedantic -Wall -Wextra -Wno-pointer-arith -Wno-unused-variable -Wno-unused-function -O3 -emit-llvm -c xdp.c -o xdp.bc
 if [ "$2" != "" ]; then
-       clang $4 -g -std=c99 -pedantic -Wall -Wextra -Wno-pointer-arith -Wno-unused-variable -O3 -emit-llvm -c "$2" -o wrapper.bc
+       clang $4 -g -std=c99 -pedantic -Wall -Wextra -Wno-pointer-arith -O3 -emit-llvm -c "$2" -o wrapper.bc
        llvm-link xdp.bc wrapper.bc | llc -O3 -march=bpf -filetype=obj -o xdp
 else
        cat xdp.bc | llc -O3 -march=bpf -filetype=obj -o xdp
diff --git a/xdp.c b/xdp.c
index 1344acd1aac2558494ad47b58b0ed89caca74f07..6c9a01ace9dfb81d7ffd07eebc596503217bd7fd 100644 (file)
--- a/xdp.c
+++ b/xdp.c
@@ -186,32 +186,89 @@ struct {
 } rate_map SEC(".maps");
 #endif /* RATE_CNT */
 
-// For per-source rate limiting, we have to use per-CPU hash maps as Linux
-// doesn't support spinlocks inside of a LRU_HASH (see if block in
-// map_check_btf as of Linux 5.10).
-// This isn't exactly accurate, but at least its faster.
-struct percpu_ratelimit {
-       int64_t sent_rate;
-       int64_t sent_time;
-};
+// We implement a rather naive hashtable here instead of using a BPF map because
+// (a) the BPF map hashtables are similarly naive (no rehashing, etc),
+// (b) the BPF map LRU hashtables don't support locking.
+//
+// We first separate into a few top-level buckets with per-bucket locks, limiting
+// us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
+//
+// Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
+// which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
+// in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
+#define SRC_HASH_MAX_PARALLELISM_POW 7
+#define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
+#define SRC_HASH_BUCKET_COUNT_POW 3
+#define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
+
+#include "rand.h"
+
+#define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
+struct persrc_rate##IPV##_entry { \
+       int64_t sent_rate; \
+       int64_t sent_time; \
+       IP_TYPE srcip; \
+}; \
+ \
+struct persrc_rate##IPV##_bucket { \
+       struct bpf_spin_lock lock; \
+       struct persrc_rate##IPV##_entry entries[]; \
+}; \
+ \
+struct persrc_rate##IPV##_ptr { \
+       struct persrc_rate##IPV##_entry *rate; \
+       struct bpf_spin_lock *lock; \
+}; \
+ \
+__attribute__((always_inline)) \
+static inline struct persrc_rate##IPV##_ptr get_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit) { \
+       struct persrc_rate##IPV##_ptr res = { .rate = NULL, .lock = NULL }; \
+       uint64_t hash = siphash(&key, sizeof(key), COMPILE_TIME_RAND); \
+ \
+       const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
+       struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
+       if (!buckets) return res; \
+ \
+       hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
+       map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
+ \
+       struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
+       bpf_spin_lock(&buckets->lock); \
+ \
+       int min_sent_idx = 0; \
+       int64_t min_sent_time = INT64_MAX; \
+       for (int i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
+               if (first_bucket[i].srcip == key) { \
+                       res.rate = &first_bucket[i]; \
+                       res.lock = &buckets->lock; \
+                       return res; \
+               } else if (min_sent_time > first_bucket[i].sent_time) { \
+                       min_sent_time = first_bucket[i].sent_time; \
+                       min_sent_idx = i; \
+               } \
+       } \
+       res.rate = &first_bucket[min_sent_idx]; \
+       res.rate->srcip = key; \
+       res.rate->sent_rate = 0; \
+       res.rate->sent_time = 0; \
+       res.lock = &buckets->lock; \
+       return res; \
+}
 
-#define V6_SRC_RATE_DEFINE(n, limit) \
-struct { \
-       __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
-       __uint(map_flags, BPF_F_NO_COMMON_LRU); \
-       __uint(max_entries, limit); \
-       uint128_t *key; \
-       struct percpu_ratelimit *value; \
-} v6_src_rate_##n SEC(".maps");
-
-#define V4_SRC_RATE_DEFINE(n, limit) \
+CREATE_PERSRC_LOOKUP(6, uint128_t)
+CREATE_PERSRC_LOOKUP(4, uint32_t)
+
+#define SRC_RATE_DEFINE(IPV, n, limit) \
+struct persrc_rate##IPV##_bucket_##n { \
+       struct bpf_spin_lock lock; \
+       struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
+}; \
 struct { \
-       __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
-       __uint(map_flags, BPF_F_NO_COMMON_LRU); \
-       __uint(max_entries, limit); \
-       __u32 *key; \
-       struct percpu_ratelimit *value; \
-} v4_src_rate_##n SEC(".maps");
+       __uint(type, BPF_MAP_TYPE_ARRAY); \
+       __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
+       uint32_t *key; \
+       struct persrc_rate##IPV##_bucket_##n *value; \
+} v##IPV##_src_rate_##n SEC(".maps");
 
 #include "maps.h"