static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
#define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
-#else
+#else /* TEST */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u32 *key;
struct ratelimit *value;
} rate_map SEC(".maps");
-#endif
+#endif /* RATE_CNT */
+
+// For per-source rate limiting, we have to use per-CPU hash maps as Linux
+// doesn't support spinlocks inside of a LRU_HASH (see if block in
+// map_check_btf as of Linux 5.10).
+// This isn't exactly accurate, but at least its faster.
+struct percpu_ratelimit {
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+
+#define V6_SRC_RATE_DEFINE(n, limit) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
+ __uint(max_entries, limit); \
+ uint128_t *key; \
+ struct percpu_ratelimit *value; \
+} v6_src_rate_##n SEC(".maps");
+
+#define V4_SRC_RATE_DEFINE(n, limit) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
+ __uint(max_entries, limit); \
+ __u32 *key; \
+ struct percpu_ratelimit *value; \
+} v4_src_rate_##n SEC(".maps");
+
+#include "maps.h"
#ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
SEC("xdp_drop")
-#endif
-#endif
+#endif /* HAVE_WRAPPER */
+#endif /* not TEST */
int xdp_drop_prog(struct xdp_md *ctx)
{
const void *const data_end = (void *)(size_t)ctx->data_end;