X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=xdp.c;h=6a8c19e4f852bad1e108093332043d0033f8e55b;hb=8bcbc3efa0d2c62d461d8f6f238a181376822d00;hp=87544ba80c16578cb548db9491fb5bc655dde178;hpb=1fde2247b74f70a7585fe3f5ffe432d6522100b7;p=flowspec-xdp diff --git a/xdp.c b/xdp.c index 87544ba..6a8c19e 100644 --- a/xdp.c +++ b/xdp.c @@ -7,7 +7,7 @@ #include #include -#define NULL (void*)0 +#include "siphash.h" /* IP flags. */ #define IP_CE 0x8000 /* Flag: "Congestion" */ @@ -96,10 +96,15 @@ struct tcphdr { #define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32) // Yes, somehow macro'ing this changes LLVM's view of htons... #define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8)) +#define BE128BEHIGH64(val) ((uint64_t)((uint128_t)(val))) + #elif defined(__BIG_ENDIAN) + #define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32)) #define HTON128(a) ((uint128_t)(a)) #define BE16(a) ((uint16_t)(a)) +#define BE128BEHIGH64(val) ((uint64_t)(((uint128_t)(val)) >> 64)) + #else #error "Need endian info" #endif @@ -172,14 +177,18 @@ struct { } \ } +// Rate limits are done in a static-sized leaky bucket with a decimal counter +// Bucket size is always exactly (1 << RATE_BUCKET_INTEGER_BITS) +#define RATE_BUCKET_DECIMAL_BITS 8 +#define RATE_BUCKET_INTEGER_BITS 4 + +#define RATE_BUCKET_BITS (RATE_BUCKET_DECIMAL_BITS + RATE_BUCKET_INTEGER_BITS) +#define RATE_TIME_MASK ((1ULL << (64 - RATE_BUCKET_BITS)) - 1) + #ifdef RATE_CNT struct ratelimit { struct bpf_spin_lock lock; - union { - int64_t sent_bytes; - int64_t sent_packets; - } rate; - int64_t sent_time; + uint64_t sent_time; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -189,35 +198,88 @@ struct { } rate_map SEC(".maps"); #endif /* RATE_CNT */ -// For per-source rate limiting, we have to use per-CPU hash maps as Linux -// doesn't support spinlocks inside of a LRU_HASH (see if block in -// map_check_btf as of Linux 5.10). -// This isn't exactly accurate, but at least its faster. -struct percpu_ratelimit { - union { - int64_t sent_bytes; - int64_t sent_packets; - } rate; - int64_t sent_time; -}; +// We implement a rather naive hashtable here instead of using a BPF map because +// (a) the BPF map hashtables are similarly naive (no rehashing, etc), +// (b) the BPF map LRU hashtables don't support locking. +// +// We first separate into a few top-level buckets with per-bucket locks, limiting +// us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors. +// +// Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries, +// which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear +// in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value. +#define SRC_HASH_MAX_PARALLELISM_POW 9 +#define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW) +#define SRC_HASH_BUCKET_COUNT_POW 3 +#define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW) + +#include "rand.h" + +#define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \ +struct persrc_rate##IPV##_entry { \ + uint64_t sent_time; \ + IP_TYPE srcip; \ +}; \ + \ +struct persrc_rate##IPV##_bucket { \ + struct bpf_spin_lock lock; \ + struct persrc_rate##IPV##_entry entries[]; \ +}; \ + \ +struct persrc_rate##IPV##_ptr { \ + struct persrc_rate##IPV##_entry *rate; \ + struct bpf_spin_lock *lock; \ +}; \ + \ +__attribute__((always_inline)) \ +static inline struct persrc_rate##IPV##_ptr get_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit) { \ + struct persrc_rate##IPV##_ptr res = { .rate = NULL, .lock = NULL }; \ + uint64_t hash = siphash(&key, sizeof(key), COMPILE_TIME_RAND); \ + \ + const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \ + struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \ + if (!buckets) return res; \ + \ + hash >>= SRC_HASH_MAX_PARALLELISM_POW; \ + map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \ + \ + struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \ + bpf_spin_lock(&buckets->lock); \ + \ + int min_sent_idx = 0; \ + uint64_t min_sent_time = UINT64_MAX; \ + for (int i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \ + if (first_bucket[i].srcip == key) { \ + res.rate = &first_bucket[i]; \ + res.lock = &buckets->lock; \ + return res; \ + } else if (min_sent_time > (first_bucket[i].sent_time & RATE_TIME_MASK)) { \ + min_sent_time = first_bucket[i].sent_time & RATE_TIME_MASK; \ + min_sent_idx = i; \ + } \ + } \ + res.rate = &first_bucket[min_sent_idx]; \ + res.rate->srcip = key; \ + res.rate->sent_time = 0; \ + res.lock = &buckets->lock; \ + return res; \ +} -#define V6_SRC_RATE_DEFINE(n, limit) \ -struct { \ - __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \ - __uint(map_flags, BPF_F_NO_COMMON_LRU); \ - __uint(max_entries, limit); \ - uint128_t *key; \ - struct percpu_ratelimit *value; \ -} v6_src_rate_##n SEC(".maps"); - -#define V4_SRC_RATE_DEFINE(n, limit) \ +CREATE_PERSRC_LOOKUP(6, uint128_t) +CREATE_PERSRC_LOOKUP(5, uint64_t) // IPv6 matching no more than a /64 +CREATE_PERSRC_LOOKUP(4, uint32_t) + +#define SRC_RATE_DEFINE(IPV, n, limit) \ +struct persrc_rate##IPV##_bucket_##n { \ + struct bpf_spin_lock lock; \ + struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \ +}; \ struct { \ - __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \ - __uint(map_flags, BPF_F_NO_COMMON_LRU); \ - __uint(max_entries, limit); \ - __u32 *key; \ - struct percpu_ratelimit *value; \ -} v4_src_rate_##n SEC(".maps"); + __uint(type, BPF_MAP_TYPE_ARRAY); \ + __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \ + uint32_t *key; \ + struct persrc_rate##IPV##_bucket_##n *value; \ +} v##IPV##_src_rate_##n SEC(".maps"); #include "maps.h"