+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, RULECNT + STATIC_RULE_CNT);
+ __u32 *key;
+ struct match_counter *value;
+} drop_cnt_map SEC(".maps");
+
+#define INCREMENT_MATCH(reason) { \
+ struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
+ if (value) { \
+ value->bytes += data_end - pktdata; \
+ value->packets += 1; \
+ } \
+}
+
+#ifdef RATE_CNT
+struct ratelimit {
+ struct bpf_spin_lock lock;
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, RATE_CNT);
+ __u32 *key;
+ struct ratelimit *value;
+} rate_map SEC(".maps");
+#endif
+
+// For per-source rate limiting, we have to use per-CPU hash maps as Linux
+// doesn't support spinlocks inside of a LRU_HASH (see if block in
+// map_check_btf as of Linux 5.10).
+// This isn't exactly accurate, but at least its faster.
+#if defined(V4_SRC_RATE_CNT) || defined(V6_SRC_RATE_CNT)
+struct percpu_ratelimit {
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+#endif
+
+#define SRC_HASH_ENTRY_MAX 8192
+#define CONCAT(a, b) a##b
+#define DEFINE_SRC_RATE_MAPS(CNT, NAME) CONCAT(DEFINE_MAP, CNT)(NAME)
+
+#ifdef V4_SRC_RATE_CNT
+#define V4_SRC_MAPS DEFINE_SRC_RATE_MAPS(V4_SRC_RATE_CNT, v4_src_rate)
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, SRC_HASH_ENTRY_MAX);
+ __u32 *key;
+ struct percpu_ratelimit *value;
+} V4_SRC_MAPS;
+#endif
+
+#ifdef V6_SRC_RATE_CNT
+#define V6_SRC_MAPS DEFINE_SRC_RATE_MAPS(V6_SRC_RATE_CNT, v6_src_rate)
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, SRC_HASH_ENTRY_MAX);
+ uint128_t *key;
+ struct percpu_ratelimit *value;
+} V6_SRC_MAPS;
+#endif