#include <linux/icmpv6.h>
#include <arpa/inet.h>
+#include "repeat_macro.h"
+
#define NULL (void*)0
/* IP flags. */
// Note that all operations on uint128s *stay* in Network byte order!
#if defined(__LITTLE_ENDIAN)
-#define BIGEND32(v) ((v >> 3*8) | ((v >> 8) & 0xff00) | ((v << 8) & 0xff0000) | (v << 3*8) & 0xff000000)
+#define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
+ ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
+ ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
+ ((((uint32_t)(v)) << 3*8) & 0xff000000))
#elif defined(__BIG_ENDIAN)
-#define BIGEND32(v) (v)
+#define BIGEND32(v) ((uint32_t)(v))
#else
#error "Need endian info"
#endif
(((uint128_t)BIGEND32(a)) << 0*32))
#define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
// Yes, somehow macro'ing this changes LLVM's view of htons...
-#define BE16(a) ((((uint16_t)(a & 0xff00)) >> 8) | (((uint16_t)(a & 0xff)) << 8))
+#define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
#elif defined(__BIG_ENDIAN)
-#define BIGEND128(a, b, c, d) ((((uint128_t)a) << 3*32) | (((uint128_t)b) << 2*32) | (((uint128_t)c) << 1*32) | (((uint128_t)d) << 0*32))
-#define HTON128(a) (a)
-#define BE16(a) ((uint16_t)a)
+#define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
+#define HTON128(a) ((uint128_t)(a))
+#define BE16(a) ((uint16_t)(a))
#else
#error "Need endian info"
#endif
#ifdef RATE_CNT
struct ratelimit {
struct bpf_spin_lock lock;
- int64_t sent_bytes;
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
int64_t sent_time;
};
struct {
} rate_map SEC(".maps");
#endif
+// For per-source rate limiting, we have to use per-CPU hash maps as Linux
+// doesn't support spinlocks inside of a LRU_HASH (see if block in
+// map_check_btf as of Linux 5.10).
+// This isn't exactly accurate, but at least its faster.
+#if defined(V4_SRC_RATE_CNT) || defined(V6_SRC_RATE_CNT)
+struct percpu_ratelimit {
+ union {
+ int64_t sent_bytes;
+ int64_t sent_packets;
+ } rate;
+ int64_t sent_time;
+};
+#endif
+
+#define SRC_HASH_ENTRY_MAX 8192
+#define CONCAT(a, b) a##b
+#define DEFINE_SRC_RATE_MAPS(CNT, NAME) CONCAT(DEFINE_MAP, CNT)(NAME)
+
+#ifdef V4_SRC_RATE_CNT
+#define V4_SRC_MAPS DEFINE_SRC_RATE_MAPS(V4_SRC_RATE_CNT, v4_src_rate)
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, SRC_HASH_ENTRY_MAX);
+ __u32 *key;
+ struct percpu_ratelimit *value;
+} V4_SRC_MAPS;
+#endif
+
+#ifdef V6_SRC_RATE_CNT
+#define V6_SRC_MAPS DEFINE_SRC_RATE_MAPS(V6_SRC_RATE_CNT, v6_src_rate)
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, SRC_HASH_ENTRY_MAX);
+ uint128_t *key;
+ struct percpu_ratelimit *value;
+} V6_SRC_MAPS;
+#endif
+
+#ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
SEC("xdp_drop")
#endif
+#endif
int xdp_drop_prog(struct xdp_md *ctx)
{
const void *const data_end = (void *)(size_t)ctx->data_end;