3 #include <linux/if_ether.h>
6 #include <linux/icmp.h>
7 #include <linux/icmpv6.h>
10 #include "repeat_macro.h"
15 #define IP_CE 0x8000 /* Flag: "Congestion" */
16 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
17 #define IP_MF 0x2000 /* Flag: "More Fragments" */
18 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
20 #define IP_PROTO_TCP 6
21 #define IP_PROTO_UDP 17
22 #define IP_PROTO_ICMP 1
23 #define IP6_PROTO_ICMPV6 58
24 #define IP6_PROTO_FRAG 44
26 typedef __uint128_t uint128_t;
28 // Our own ipv6hdr that uses uint128_t
30 #if defined(__LITTLE_ENDIAN_BITFIELD)
33 #elif defined(__BIG_ENDIAN_BITFIELD)
37 #error "Please fix <asm/byteorder.h>"
47 } __attribute__((packed));
50 #define IP6_FRAGOFF 0xfff8
54 uint16_t frag_off; // BE low 3 bits flags, last is "more frags"
56 } __attribute__((packed));
58 // Our own ethhdr with optional vlan tags
60 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
61 unsigned char h_source[ETH_ALEN]; /* source ether addr */
62 __be16 vlan_magic; /* 0x8100 */
63 __be16 tci; /* PCP (3 bits), DEI (1 bit), and VLAN (12 bits) */
64 __be16 h_proto; /* packet type ID field */
65 } __attribute__((packed));
67 // Our own tcphdr without the flags blown up
77 } __attribute__((packed));
79 // Note that all operations on uint128s *stay* in Network byte order!
81 #if defined(__LITTLE_ENDIAN)
82 #define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
83 ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
84 ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
85 ((((uint32_t)(v)) << 3*8) & 0xff000000))
86 #elif defined(__BIG_ENDIAN)
87 #define BIGEND32(v) ((uint32_t)(v))
89 #error "Need endian info"
92 #if defined(__LITTLE_ENDIAN)
93 #define BIGEND128(a, b, c, d) ( \
94 (((uint128_t)BIGEND32(d)) << 3*32) | \
95 (((uint128_t)BIGEND32(c)) << 2*32) | \
96 (((uint128_t)BIGEND32(b)) << 1*32) | \
97 (((uint128_t)BIGEND32(a)) << 0*32))
98 #define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
99 // Yes, somehow macro'ing this changes LLVM's view of htons...
100 #define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
101 #elif defined(__BIG_ENDIAN)
102 #define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
103 #define HTON128(a) ((uint128_t)(a))
104 #define BE16(a) ((uint16_t)(a))
106 #error "Need endian info"
109 #define MASK4(pfxlen) BIGEND32(~((((uint32_t)1) << (32 - pfxlen)) - 1))
110 #define MASK6(pfxlen) HTON128(~((((uint128_t)1) << (128 - pfxlen)) - 1))
111 #define MASK6_OFFS(offs, pfxlen) HTON128((~((((uint128_t)1) << (128 - pfxlen)) - 1)) & ((((uint128_t)1) << (128 - offs)) - 1))
113 // PARSE is used as a preprocessor flag to indicate parsing fields
117 #define unlikely(a) __builtin_expect(a, 0)
118 #define likely(a) __builtin_expect(a, 1)
120 static const uint32_t PKT_LEN_DROP = 0;
121 static const uint32_t VLAN_DROP = 1;
122 static const uint32_t IHL_DROP = 2;
123 static const uint32_t V6FRAG_DROP = 3;
124 #define STATIC_RULE_CNT 4
126 #define DO_RETURN(reason, ret) {\
127 if (ret == XDP_DROP) { INCREMENT_MATCH(reason); } \
131 // It seems (based on drop counts) that data_end points to the last byte, not one-past-the-end.
132 // This feels strange, but some documentation suggests > here as well, so we stick with that.
133 #define CHECK_LEN(start, struc) \
134 if (unlikely((void*)(start) + sizeof(struct struc) > data_end)) DO_RETURN(PKT_LEN_DROP, XDP_DROP);
137 // 64 bit version of xdp_md for testing
142 /* Below access go through struct xdp_rxq_info */
143 __u64 ingress_ifindex; /* rxq->dev->ifindex */
144 __u64 rx_queue_index; /* rxq->queue_index */
146 __u64 egress_ifindex; /* txq->dev->ifindex */
148 static const int XDP_PASS = 0;
149 static const int XDP_DROP = 1;
151 static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
152 #define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
155 #include <linux/bpf.h>
156 #include <bpf/bpf_helpers.h>
158 struct match_counter {
163 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
164 __uint(max_entries, RULECNT + STATIC_RULE_CNT);
166 struct match_counter *value;
167 } drop_cnt_map SEC(".maps");
169 #define INCREMENT_MATCH(reason) { \
170 struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
172 value->bytes += data_end - pktdata; \
173 value->packets += 1; \
179 struct bpf_spin_lock lock;
182 int64_t sent_packets;
187 __uint(type, BPF_MAP_TYPE_ARRAY);
188 __uint(max_entries, RATE_CNT);
190 struct ratelimit *value;
191 } rate_map SEC(".maps");
194 // For per-source rate limiting, we have to use per-CPU hash maps as Linux
195 // doesn't support spinlocks inside of a LRU_HASH (see if block in
196 // map_check_btf as of Linux 5.10).
197 // This isn't exactly accurate, but at least its faster.
198 #if defined(V4_SRC_RATE_CNT) || defined(V6_SRC_RATE_CNT)
199 struct percpu_ratelimit {
202 int64_t sent_packets;
208 #define SRC_HASH_ENTRY_MAX 8192
209 #define CONCAT(a, b) a##b
210 #define DEFINE_SRC_RATE_MAPS(CNT, NAME) CONCAT(DEFINE_MAP, CNT)(NAME)
212 #ifdef V4_SRC_RATE_CNT
213 #define V4_SRC_MAPS DEFINE_SRC_RATE_MAPS(V4_SRC_RATE_CNT, v4_src_rate)
215 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
216 __uint(max_entries, SRC_HASH_ENTRY_MAX);
218 struct percpu_ratelimit *value;
222 #ifdef V6_SRC_RATE_CNT
223 #define V6_SRC_MAPS DEFINE_SRC_RATE_MAPS(V6_SRC_RATE_CNT, v6_src_rate)
225 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
226 __uint(max_entries, SRC_HASH_ENTRY_MAX);
228 struct percpu_ratelimit *value;
232 #ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
236 int xdp_drop_prog(struct xdp_md *ctx)
238 const void *const data_end = (void *)(size_t)ctx->data_end;
240 const void * pktdata;
241 unsigned short eth_proto;
244 // DO_RETURN in CHECK_LEN relies on pktdata being set to calculate packet length.
245 // That said, we don't want to overflow, so just set packet length to 0 here.
247 CHECK_LEN((size_t)ctx->data, ethhdr);
248 const struct ethhdr *const eth = (void*)(size_t)ctx->data;
249 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
251 #if PARSE_8021Q == PARSE
252 if (likely(eth->h_proto == BE16(ETH_P_8021Q))) {
253 CHECK_LEN((size_t)ctx->data, ethhdr_vlan);
254 const struct ethhdr_vlan *const eth_vlan = (void*)(size_t)ctx->data;
255 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
257 if (unlikely((eth_vlan->tci & BE16(0xfff)) != BE16(REQ_8021Q)))
258 DO_RETURN(VLAN_DROP, XDP_DROP);
260 eth_proto = eth_vlan->h_proto;
262 if (unlikely(eth->h_proto == BE16(ETH_P_8021Q))) {
263 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
264 DO_RETURN(VLAN_DROP, PARSE_8021Q);
268 DO_RETURN(VLAN_DROP, XDP_DROP);
270 eth_proto = eth->h_proto;
275 const void *l4hdr = NULL;
276 const struct tcphdr *tcp = NULL;
277 uint8_t ports_valid = 0;
278 uint16_t sport, dport; // Host Endian! Only valid with tcp || udp
281 if (eth_proto == BE16(ETH_P_IP)) {
282 CHECK_LEN(pktdata, iphdr);
283 struct iphdr *ip = (struct iphdr*) pktdata;
285 #if PARSE_IHL == PARSE
286 if (unlikely(ip->ihl < 5)) DO_RETURN(IHL_DROP, XDP_DROP);
287 l4hdr = pktdata + ip->ihl * 4;
289 if (ip->ihl != 5) DO_RETURN(IHL_DROP, PARSE_IHL);
290 l4hdr = pktdata + 5*4;
293 const struct icmphdr *icmp = NULL;
294 if ((ip->frag_off & BE16(IP_OFFSET)) == 0) {
295 if (ip->protocol == IP_PROTO_TCP) {
296 CHECK_LEN(l4hdr, tcphdr);
297 tcp = (struct tcphdr*) l4hdr;
298 sport = BE16(tcp->source);
299 dport = BE16(tcp->dest);
301 } else if (ip->protocol == IP_PROTO_UDP) {
302 CHECK_LEN(l4hdr, udphdr);
303 const struct udphdr *udp = (struct udphdr*) l4hdr;
304 sport = BE16(udp->source);
305 dport = BE16(udp->dest);
307 } else if (ip->protocol == IP_PROTO_ICMP) {
308 CHECK_LEN(l4hdr, icmphdr);
309 icmp = (struct icmphdr*) l4hdr;
317 if (eth_proto == BE16(ETH_P_IPV6)) {
318 CHECK_LEN(pktdata, ip6hdr);
319 struct ip6hdr *ip6 = (struct ip6hdr*) pktdata;
321 l4hdr = pktdata + 40;
323 uint8_t v6nexthdr = ip6->nexthdr;
324 const struct ip6_fraghdr *frag6 = NULL;
326 #if PARSE_V6_FRAG == PARSE
327 if (ip6->nexthdr == IP6_PROTO_FRAG) {
328 CHECK_LEN(l4hdr, ip6_fraghdr);
329 frag6 = (struct ip6_fraghdr*) l4hdr;
330 l4hdr = l4hdr + sizeof(struct ip6_fraghdr);
331 v6nexthdr = frag6->nexthdr;
333 if (unlikely(ip6->nexthdr == IP6_PROTO_FRAG)) {
334 DO_RETURN(V6FRAG_DROP, PARSE_V6_FRAG);
338 // TODO: Handle more options?
340 const struct icmp6hdr *icmpv6 = NULL;
341 if (frag6 == NULL || (frag6->frag_off & BE16(IP6_FRAGOFF)) == 0) {
342 if (v6nexthdr == IP_PROTO_TCP) {
343 CHECK_LEN(l4hdr, tcphdr);
344 tcp = (struct tcphdr*) l4hdr;
345 sport = BE16(tcp->source);
346 dport = BE16(tcp->dest);
348 } else if (v6nexthdr == IP_PROTO_UDP) {
349 CHECK_LEN(l4hdr, udphdr);
350 const struct udphdr *udp = (struct udphdr*) l4hdr;
351 sport = BE16(udp->source);
352 dport = BE16(udp->dest);
354 } else if (v6nexthdr == IP6_PROTO_ICMPV6) {
355 CHECK_LEN(l4hdr, icmp6hdr);
356 icmpv6 = (struct icmp6hdr*) l4hdr;
373 struct xdp_md test = {
375 // -1 because sizeof includes a trailing null in the "string"
376 .data_end = (uint64_t)(d + sizeof(d) - 1),
378 assert(xdp_drop_prog(&test) == TEST_EXP);