#define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
// Yes, somehow macro'ing this changes LLVM's view of htons...
#define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
+#define BE128BEHIGH64(val) ((uint64_t)((uint128_t)(val)))
+
#elif defined(__BIG_ENDIAN)
+
#define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
#define HTON128(a) ((uint128_t)(a))
#define BE16(a) ((uint16_t)(a))
+#define BE128BEHIGH64(val) ((uint64_t)(((uint128_t)(val)) >> 64))
+
#else
#error "Need endian info"
#endif
static const int XDP_PASS = 0;
static const int XDP_DROP = 1;
-static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
+static long drop_cnt_map[STATS_RULECNT + STATIC_RULE_CNT];
#define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
#else /* TEST */
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __uint(max_entries, RULECNT + STATIC_RULE_CNT);
+ __uint(max_entries, STATS_RULECNT + STATIC_RULE_CNT);
__u32 *key;
struct match_counter *value;
} drop_cnt_map SEC(".maps");
} \
}
+// Rate limits are done in a static-sized leaky bucket with a decimal counter
+// Bucket size is always exactly (1 << RATE_BUCKET_INTEGER_BITS)
+#define RATE_BUCKET_DECIMAL_BITS 8
+#define RATE_BUCKET_INTEGER_BITS 4
+
+#define RATE_BUCKET_BITS (RATE_BUCKET_DECIMAL_BITS + RATE_BUCKET_INTEGER_BITS)
+#define RATE_TIME_MASK ((1ULL << (64 - RATE_BUCKET_BITS)) - 1)
+
+// Time going backwards 10ms+ or forward 32sec+ implies we should consider it
+// an overflow, or at least stale enough that we should reset the entry.
+#define RATE_MIN_TIME_OFFSET -10000000LL
+#define RATE_MAX_TIME_OFFSET 32000000000LL
+
#ifdef RATE_CNT
struct ratelimit {
struct bpf_spin_lock lock;
- int64_t sent_rate;
- int64_t sent_time;
+ uint64_t sent_time;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
} rate_map SEC(".maps");
#endif /* RATE_CNT */
-// For per-source rate limiting, we have to use per-CPU hash maps as Linux
-// doesn't support spinlocks inside of a LRU_HASH (see if block in
-// map_check_btf as of Linux 5.10).
-// This isn't exactly accurate, but at least its faster.
-struct percpu_ratelimit {
- int64_t sent_rate;
- int64_t sent_time;
-};
+// We implement a rather naive hashtable here instead of using a BPF map because
+// (a) the BPF map hashtables are similarly naive (no rehashing, etc),
+// (b) the BPF map LRU hashtables don't support locking.
+//
+// We first separate into a few top-level buckets with per-bucket locks, limiting
+// us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
+//
+// Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
+// which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
+// in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
+//
+// Because we use buckets of size 16, see collision_prob.py, the number of
+// elements we can hold with only a 1% probability of overflowing a bucket is:
+//
+// 128K-entry hash table (2MiB): ~33K sources
+// 256K-entry hash table (4MiB): ~63K sources
+// 512K-entry hash table (8MiB): ~119K sources
+// 1M-entry hash table (16MiB): ~227K sources
+#define SRC_HASH_MAX_PARALLELISM_POW 8
+#define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
+#define SRC_HASH_BUCKET_COUNT_POW 4
+#define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
+
+#define DO_RATE_LIMIT(do_lock, rate, time_masked, amt_in_pkt, limit_ns_per_pkt, matchbool) do { \
+if (rate) { \
+ do_lock; \
+ int64_t bucket_pkts = (rate->sent_time & (~RATE_TIME_MASK)) >> (64 - RATE_BUCKET_BITS); \
+ /* We mask the top 12 bits, so date overflows every 52 days, handled below */ \
+ int64_t time_diff = time_masked - ((int64_t)(rate->sent_time & RATE_TIME_MASK)); \
+ if (unlikely(time_diff < -1000000000 || time_diff > 16000000000)) { \
+ bucket_pkts = 0; \
+ } else { \
+ if (unlikely(time_diff < 0)) { time_diff = 0; } \
+ int64_t pkts_since_last = (time_diff << RATE_BUCKET_BITS) * ((uint64_t)amt_in_pkt) / ((uint64_t)limit_ns_per_pkt); \
+ bucket_pkts -= pkts_since_last; \
+ } \
+ if (bucket_pkts < (((1 << RATE_BUCKET_INTEGER_BITS) - 1) << RATE_BUCKET_DECIMAL_BITS)) { \
+ if (unlikely(bucket_pkts < 0)) bucket_pkts = 0; \
+ rate->sent_time = time_masked | ((bucket_pkts + (1 << RATE_BUCKET_DECIMAL_BITS)) << (64 - RATE_BUCKET_BITS)); \
+ matchbool = 0; \
+ } else { \
+ matchbool = 1; \
+ } \
+} \
+} while(0);
+
+#define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
+struct persrc_rate##IPV##_entry { \
+ uint64_t sent_time; \
+ IP_TYPE srcip; \
+}; \
+ \
+struct persrc_rate##IPV##_bucket { \
+ struct bpf_spin_lock lock; \
+ struct persrc_rate##IPV##_entry entries[]; \
+}; \
+ \
+static int check_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit, int64_t cur_time_masked, uint64_t amt, uint64_t limit_ns_per_pkt) { \
+ uint64_t hash = siphash_##IP_TYPE(key); \
+ \
+ const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
+ struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
+ if (!buckets) return 0; \
+ \
+ hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
+ map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
+ \
+ struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
+ bpf_spin_lock(&buckets->lock); \
+ \
+ int min_sent_idx = 0; \
+ uint64_t min_sent_time = UINT64_MAX; \
+ for (int i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
+ if (first_bucket[i].srcip == key) { \
+ min_sent_idx = i; \
+ break; \
+ } \
+ int64_t time_offset = ((int64_t)cur_time_masked) - (first_bucket[i].sent_time & RATE_TIME_MASK); \
+ if (time_offset < RATE_MIN_TIME_OFFSET || time_offset > RATE_MAX_TIME_OFFSET) { \
+ min_sent_idx = i; \
+ break; \
+ } \
+ if ((first_bucket[i].sent_time & RATE_TIME_MASK) < min_sent_time) { \
+ min_sent_time = first_bucket[i].sent_time & RATE_TIME_MASK; \
+ min_sent_idx = i; \
+ } \
+ } \
+ struct persrc_rate##IPV##_entry *entry = &first_bucket[min_sent_idx]; \
+ if (entry->srcip != key) { \
+ entry->srcip = key; \
+ entry->sent_time = 0; \
+ } \
+ int matched = 0; \
+ DO_RATE_LIMIT(, entry, cur_time_masked, amt, limit_ns_per_pkt, matched); \
+ bpf_spin_unlock(&buckets->lock); \
+ return matched; \
+}
-#define V6_SRC_RATE_DEFINE(n, limit) \
-struct { \
- __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
- __uint(map_flags, BPF_F_NO_COMMON_LRU); \
- __uint(max_entries, limit); \
- uint128_t *key; \
- struct percpu_ratelimit *value; \
-} v6_src_rate_##n SEC(".maps");
-
-#define V4_SRC_RATE_DEFINE(n, limit) \
+CREATE_PERSRC_LOOKUP(6, uint128_t)
+CREATE_PERSRC_LOOKUP(5, uint64_t) // IPv6 matching no more than a /64
+CREATE_PERSRC_LOOKUP(4, uint32_t)
+
+#define SRC_RATE_DEFINE(IPV, n, limit) \
+struct persrc_rate##IPV##_bucket_##n { \
+ struct bpf_spin_lock lock; \
+ struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
+}; \
struct { \
- __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
- __uint(map_flags, BPF_F_NO_COMMON_LRU); \
- __uint(max_entries, limit); \
- __u32 *key; \
- struct percpu_ratelimit *value; \
-} v4_src_rate_##n SEC(".maps");
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
+ uint32_t *key; \
+ struct persrc_rate##IPV##_bucket_##n *value; \
+} v##IPV##_src_rate_##n SEC(".maps");
#include "maps.h"