+#ifdef RATE_CNT
+struct ratelimit {
+ struct bpf_spin_lock lock;
+ uint64_t sent_time;
+};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, RATE_CNT);
+ __u32 *key;
+ struct ratelimit *value;
+} rate_map SEC(".maps");
+#endif /* RATE_CNT */
+
+// We implement a rather naive hashtable here instead of using a BPF map because
+// (a) the BPF map hashtables are similarly naive (no rehashing, etc),
+// (b) the BPF map LRU hashtables don't support locking.
+//
+// We first separate into a few top-level buckets with per-bucket locks, limiting
+// us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
+//
+// Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
+// which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
+// in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
+//
+// Because we use buckets of size 16, see collision_prob.py, the number of
+// elements we can hold with only a 1% probability of overflowing a bucket is:
+//
+// 128K-entry hash table (2MiB): ~33K sources
+// 256K-entry hash table (4MiB): ~63K sources
+// 512K-entry hash table (8MiB): ~119K sources
+// 1M-entry hash table (16MiB): ~227K sources
+#define SRC_HASH_MAX_PARALLELISM_POW 8
+#define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
+#define SRC_HASH_BUCKET_COUNT_POW 4
+#define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
+
+#define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
+struct persrc_rate##IPV##_entry { \
+ uint64_t sent_time; \
+ IP_TYPE srcip; \
+}; \
+ \
+struct persrc_rate##IPV##_bucket { \
+ struct bpf_spin_lock lock; \
+ struct persrc_rate##IPV##_entry entries[]; \
+}; \
+ \
+struct persrc_rate##IPV##_ptr { \
+ struct persrc_rate##IPV##_entry *rate; \
+ struct bpf_spin_lock *lock; \
+}; \
+ \
+__attribute__((always_inline)) \
+static inline struct persrc_rate##IPV##_ptr get_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit, int64_t cur_time_masked) { \
+ struct persrc_rate##IPV##_ptr res = { .rate = NULL, .lock = NULL }; \
+ uint64_t hash = siphash_##IP_TYPE(key); \
+ \
+ const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
+ struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
+ if (!buckets) return res; \
+ \
+ hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
+ map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
+ \
+ struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
+ bpf_spin_lock(&buckets->lock); \
+ \
+ int min_sent_idx = 0; \
+ uint64_t min_sent_time = UINT64_MAX; \
+ for (int i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
+ if (first_bucket[i].srcip == key) { \
+ res.rate = &first_bucket[i]; \
+ res.lock = &buckets->lock; \
+ return res; \
+ } \
+ int64_t time_offset = ((int64_t)cur_time_masked) - (first_bucket[i].sent_time & RATE_TIME_MASK); \
+ if (time_offset < RATE_MIN_TIME_OFFSET || time_offset > RATE_MAX_TIME_OFFSET) { \
+ min_sent_idx = i; \
+ break; \
+ } \
+ if ((first_bucket[i].sent_time & RATE_TIME_MASK) < min_sent_time) { \
+ min_sent_time = first_bucket[i].sent_time & RATE_TIME_MASK; \
+ min_sent_idx = i; \
+ } \
+ } \
+ res.rate = &first_bucket[min_sent_idx]; \
+ res.rate->srcip = key; \
+ res.rate->sent_time = 0; \
+ res.lock = &buckets->lock; \
+ return res; \
+}
+
+CREATE_PERSRC_LOOKUP(6, uint128_t)
+CREATE_PERSRC_LOOKUP(5, uint64_t) // IPv6 matching no more than a /64
+CREATE_PERSRC_LOOKUP(4, uint32_t)
+
+#define SRC_RATE_DEFINE(IPV, n, limit) \
+struct persrc_rate##IPV##_bucket_##n { \
+ struct bpf_spin_lock lock; \
+ struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
+}; \
+struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
+ uint32_t *key; \
+ struct persrc_rate##IPV##_bucket_##n *value; \
+} v##IPV##_src_rate_##n SEC(".maps");
+
+#include "maps.h"
+
+#ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
+SEC("xdp_drop")
+#endif /* HAVE_WRAPPER */
+#endif /* not TEST */