// Note that all operations on uint128s *stay* in Network byte order!
#if defined(__LITTLE_ENDIAN)
-#define BIGEND32(v) ((v >> 3*8) | ((v >> 8) & 0xff00) | ((v << 8) & 0xff0000) | (v << 3*8) & 0xff000000)
+#define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
+ ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
+ ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
+ ((((uint32_t)(v)) << 3*8) & 0xff000000))
#elif defined(__BIG_ENDIAN)
-#define BIGEND32(v) (v)
+#define BIGEND32(v) ((uint32_t)(v))
#else
#error "Need endian info"
#endif
(((uint128_t)BIGEND32(a)) << 0*32))
#define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
// Yes, somehow macro'ing this changes LLVM's view of htons...
-#define BE16(a) ((((uint16_t)(a & 0xff00)) >> 8) | (((uint16_t)(a & 0xff)) << 8))
+#define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
#elif defined(__BIG_ENDIAN)
-#define BIGEND128(a, b, c, d) ((((uint128_t)a) << 3*32) | (((uint128_t)b) << 2*32) | (((uint128_t)c) << 1*32) | (((uint128_t)d) << 0*32))
-#define HTON128(a) (a)
-#define BE16(a) ((uint16_t)a)
+#define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
+#define HTON128(a) ((uint128_t)(a))
+#define BE16(a) ((uint16_t)(a))
#else
#error "Need endian info"
#endif
static const int XDP_DROP = 1;
static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
-#define INCREMENT_MATCH(reason) drop_cnt_map[reason] += 1;
+#define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
-#else
+#else /* TEST */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-struct bpf_map_def SEC("maps") drop_cnt_map = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(uint32_t),
- .value_size = sizeof(long),
- .max_entries = RULECNT + STATIC_RULE_CNT,
+struct match_counter {
+ uint64_t bytes;
+ uint64_t packets;
};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, RULECNT + STATIC_RULE_CNT);
+ __u32 *key;
+ struct match_counter *value;
+} drop_cnt_map SEC(".maps");
+
#define INCREMENT_MATCH(reason) { \
- long *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
- if (value) \
- *value += 1; \
+ struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
+ if (value) { \
+ value->bytes += data_end - pktdata; \
+ value->packets += 1; \
+ } \
}
#ifdef RATE_CNT
struct ratelimit {
struct bpf_spin_lock lock;
- uint64_t bucket_secs;
- uint64_t bucket_count;
+ int64_t sent_rate;
+ int64_t sent_time;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__u32 *key;
struct ratelimit *value;
} rate_map SEC(".maps");
-#endif
+#endif /* RATE_CNT */
+
+// For per-source rate limiting, we have to use per-CPU hash maps as Linux
+// doesn't support spinlocks inside of a LRU_HASH (see if block in
+// map_check_btf as of Linux 5.10).
+// This isn't exactly accurate, but at least its faster.
+struct percpu_ratelimit {
+ int64_t sent_rate;
+ int64_t sent_time;
+};
+#define V6_SRC_RATE_DEFINE(n, limit) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
+ __uint(map_flags, BPF_F_NO_COMMON_LRU); \
+ __uint(max_entries, limit); \
+ uint128_t *key; \
+ struct percpu_ratelimit *value; \
+} v6_src_rate_##n SEC(".maps");
+
+#define V4_SRC_RATE_DEFINE(n, limit) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); \
+ __uint(map_flags, BPF_F_NO_COMMON_LRU); \
+ __uint(max_entries, limit); \
+ __u32 *key; \
+ struct percpu_ratelimit *value; \
+} v4_src_rate_##n SEC(".maps");
+
+#include "maps.h"
+
+#ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
SEC("xdp_drop")
-#endif
+#endif /* HAVE_WRAPPER */
+#endif /* not TEST */
int xdp_drop_prog(struct xdp_md *ctx)
{
const void *const data_end = (void *)(size_t)ctx->data_end;
unsigned short eth_proto;
{
+ // DO_RETURN in CHECK_LEN relies on pktdata being set to calculate packet length.
+ // That said, we don't want to overflow, so just set packet length to 0 here.
+ pktdata = data_end;
CHECK_LEN((size_t)ctx->data, ethhdr);
const struct ethhdr *const eth = (void*)(size_t)ctx->data;
+ pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
#if PARSE_8021Q == PARSE
if (likely(eth->h_proto == BE16(ETH_P_8021Q))) {
CHECK_LEN((size_t)ctx->data, ethhdr_vlan);
const struct ethhdr_vlan *const eth_vlan = (void*)(size_t)ctx->data;
-
+ pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
#ifdef REQ_8021Q
if (unlikely((eth_vlan->tci & BE16(0xfff)) != BE16(REQ_8021Q)))
DO_RETURN(VLAN_DROP, XDP_DROP);
#endif
-
eth_proto = eth_vlan->h_proto;
- pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
#else
if (unlikely(eth->h_proto == BE16(ETH_P_8021Q))) {
+ pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
DO_RETURN(VLAN_DROP, PARSE_8021Q);
#endif
} else {
#ifdef REQ_8021Q
DO_RETURN(VLAN_DROP, XDP_DROP);
#else
- pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
eth_proto = eth->h_proto;
#endif
}
const void *l4hdr = NULL;
const struct tcphdr *tcp = NULL;
- uint8_t ports_valid = 0;
- uint16_t sport, dport; // Host Endian! Only valid with tcp || udp
+ int32_t sport = -1, dport = -1; // Host Endian! Only valid with tcp || udp
#ifdef NEED_V4_PARSE
if (eth_proto == BE16(ETH_P_IP)) {
tcp = (struct tcphdr*) l4hdr;
sport = BE16(tcp->source);
dport = BE16(tcp->dest);
- ports_valid = 1;
} else if (ip->protocol == IP_PROTO_UDP) {
CHECK_LEN(l4hdr, udphdr);
const struct udphdr *udp = (struct udphdr*) l4hdr;
sport = BE16(udp->source);
dport = BE16(udp->dest);
- ports_valid = 1;
} else if (ip->protocol == IP_PROTO_ICMP) {
CHECK_LEN(l4hdr, icmphdr);
icmp = (struct icmphdr*) l4hdr;
tcp = (struct tcphdr*) l4hdr;
sport = BE16(tcp->source);
dport = BE16(tcp->dest);
- ports_valid = 1;
} else if (v6nexthdr == IP_PROTO_UDP) {
CHECK_LEN(l4hdr, udphdr);
const struct udphdr *udp = (struct udphdr*) l4hdr;
sport = BE16(udp->source);
dport = BE16(udp->dest);
- ports_valid = 1;
} else if (v6nexthdr == IP6_PROTO_ICMPV6) {
CHECK_LEN(l4hdr, icmp6hdr);
icmpv6 = (struct icmp6hdr*) l4hdr;