-#define DO_RETURN(reason, ret) {\
- if (ret == XDP_DROP) { \
- long *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
- if (value) \
- *value += 1; \
- } \
- return XDP_DROP; \
- }
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, RULECNT + STATIC_RULE_CNT);
+ __u32 *key;
+ struct match_counter *value;
+} drop_cnt_map SEC(".maps");
+
+#define INCREMENT_MATCH(reason) { \
+ struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
+ if (value) { \
+ value->bytes += data_end - pktdata; \
+ value->packets += 1; \
+ } \
+}
+
+#ifdef RATE_CNT
+struct ratelimit {
+ struct bpf_spin_lock lock;
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, RATE_CNT);
+ __u32 *key;
+ struct ratelimit *value;
+} rate_map SEC(".maps");
+#endif
+
+// For per-source rate limiting, we have to use per-CPU hash maps as Linux
+// doesn't support spinlocks inside of a LRU_HASH (see if block in
+// map_check_btf as of Linux 5.10).
+// This isn't exactly accurate, but at least its faster.
+#if defined(V4_SRC_RATE_CNT) || defined(V6_SRC_RATE_CNT)
+struct percpu_ratelimit {
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+#endif