3 #include <linux/if_ether.h>
6 #include <linux/icmp.h>
7 #include <linux/icmpv6.h>
13 #define IP_CE 0x8000 /* Flag: "Congestion" */
14 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
15 #define IP_MF 0x2000 /* Flag: "More Fragments" */
16 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
18 #define IP_PROTO_TCP 6
19 #define IP_PROTO_UDP 17
20 #define IP_PROTO_ICMP 1
21 #define IP6_PROTO_ICMPV6 58
22 #define IP6_PROTO_FRAG 44
24 typedef __uint128_t uint128_t;
26 // Our own ipv6hdr that uses uint128_t
28 #if defined(__LITTLE_ENDIAN_BITFIELD)
31 #elif defined(__BIG_ENDIAN_BITFIELD)
35 #error "Please fix <asm/byteorder.h>"
45 } __attribute__((packed));
48 #define IP6_FRAGOFF 0xfff8
52 uint16_t frag_off; // BE low 3 bits flags, last is "more frags"
54 } __attribute__((packed));
56 // Our own ethhdr with optional vlan tags
58 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
59 unsigned char h_source[ETH_ALEN]; /* source ether addr */
60 __be16 vlan_magic; /* 0x8100 */
61 __be16 tci; /* PCP (3 bits), DEI (1 bit), and VLAN (12 bits) */
62 __be16 h_proto; /* packet type ID field */
63 } __attribute__((packed));
65 // Our own tcphdr without the flags blown up
75 } __attribute__((packed));
77 // Note that all operations on uint128s *stay* in Network byte order!
79 #if defined(__LITTLE_ENDIAN)
80 #define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
81 ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
82 ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
83 ((((uint32_t)(v)) << 3*8) & 0xff000000))
84 #elif defined(__BIG_ENDIAN)
85 #define BIGEND32(v) ((uint32_t)(v))
87 #error "Need endian info"
90 #if defined(__LITTLE_ENDIAN)
91 #define BIGEND128(a, b, c, d) ( \
92 (((uint128_t)BIGEND32(d)) << 3*32) | \
93 (((uint128_t)BIGEND32(c)) << 2*32) | \
94 (((uint128_t)BIGEND32(b)) << 1*32) | \
95 (((uint128_t)BIGEND32(a)) << 0*32))
96 #define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
97 // Yes, somehow macro'ing this changes LLVM's view of htons...
98 #define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
99 #define BE128BEHIGH64(val) ((uint64_t)((uint128_t)(val)))
101 #elif defined(__BIG_ENDIAN)
103 #define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
104 #define HTON128(a) ((uint128_t)(a))
105 #define BE16(a) ((uint16_t)(a))
106 #define BE128BEHIGH64(val) ((uint64_t)(((uint128_t)(val)) >> 64))
109 #error "Need endian info"
112 #define MASK4(pfxlen) BIGEND32(~((((uint32_t)1) << (32 - pfxlen)) - 1))
113 #define MASK6(pfxlen) HTON128(~((((uint128_t)1) << (128 - pfxlen)) - 1))
114 #define MASK6_OFFS(offs, pfxlen) HTON128((~((((uint128_t)1) << (128 - pfxlen)) - 1)) & ((((uint128_t)1) << (128 - offs)) - 1))
116 // PARSE is used as a preprocessor flag to indicate parsing fields
120 #define unlikely(a) __builtin_expect(a, 0)
121 #define likely(a) __builtin_expect(a, 1)
123 static const uint32_t PKT_LEN_DROP = 0;
124 static const uint32_t VLAN_DROP = 1;
125 static const uint32_t IHL_DROP = 2;
126 static const uint32_t V6FRAG_DROP = 3;
127 #define STATIC_RULE_CNT 4
129 #define DO_RETURN(reason, ret) {\
130 if (ret == XDP_DROP) { INCREMENT_MATCH(reason); } \
134 // It seems (based on drop counts) that data_end points to the last byte, not one-past-the-end.
135 // This feels strange, but some documentation suggests > here as well, so we stick with that.
136 #define CHECK_LEN(start, struc) \
137 if (unlikely((void*)(start) + sizeof(struct struc) > data_end)) DO_RETURN(PKT_LEN_DROP, XDP_DROP);
140 // 64 bit version of xdp_md for testing
145 /* Below access go through struct xdp_rxq_info */
146 __u64 ingress_ifindex; /* rxq->dev->ifindex */
147 __u64 rx_queue_index; /* rxq->queue_index */
149 __u64 egress_ifindex; /* txq->dev->ifindex */
151 static const int XDP_PASS = 0;
152 static const int XDP_DROP = 1;
154 static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
155 #define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
158 #include <linux/bpf.h>
159 #include <bpf/bpf_helpers.h>
161 struct match_counter {
166 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
167 __uint(max_entries, RULECNT + STATIC_RULE_CNT);
169 struct match_counter *value;
170 } drop_cnt_map SEC(".maps");
172 #define INCREMENT_MATCH(reason) { \
173 struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
175 value->bytes += data_end - pktdata; \
176 value->packets += 1; \
180 // Rate limits are done in a static-sized leaky bucket with a decimal counter
181 // Bucket size is always exactly (1 << RATE_BUCKET_INTEGER_BITS)
182 #define RATE_BUCKET_DECIMAL_BITS 8
183 #define RATE_BUCKET_INTEGER_BITS 4
185 #define RATE_BUCKET_BITS (RATE_BUCKET_DECIMAL_BITS + RATE_BUCKET_INTEGER_BITS)
186 #define RATE_TIME_MASK ((1ULL << (64 - RATE_BUCKET_BITS)) - 1)
190 struct bpf_spin_lock lock;
194 __uint(type, BPF_MAP_TYPE_ARRAY);
195 __uint(max_entries, RATE_CNT);
197 struct ratelimit *value;
198 } rate_map SEC(".maps");
199 #endif /* RATE_CNT */
201 // We implement a rather naive hashtable here instead of using a BPF map because
202 // (a) the BPF map hashtables are similarly naive (no rehashing, etc),
203 // (b) the BPF map LRU hashtables don't support locking.
205 // We first separate into a few top-level buckets with per-bucket locks, limiting
206 // us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
208 // Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
209 // which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
210 // in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
211 #define SRC_HASH_MAX_PARALLELISM_POW 9
212 #define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
213 #define SRC_HASH_BUCKET_COUNT_POW 3
214 #define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
218 #define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
219 struct persrc_rate##IPV##_entry { \
220 uint64_t sent_time; \
224 struct persrc_rate##IPV##_bucket { \
225 struct bpf_spin_lock lock; \
226 struct persrc_rate##IPV##_entry entries[]; \
229 struct persrc_rate##IPV##_ptr { \
230 struct persrc_rate##IPV##_entry *rate; \
231 struct bpf_spin_lock *lock; \
234 __attribute__((always_inline)) \
235 static inline struct persrc_rate##IPV##_ptr get_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit) { \
236 struct persrc_rate##IPV##_ptr res = { .rate = NULL, .lock = NULL }; \
237 uint64_t hash = siphash(&key, sizeof(key), COMPILE_TIME_RAND); \
239 const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
240 struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
241 if (!buckets) return res; \
243 hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
244 map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
246 struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
247 bpf_spin_lock(&buckets->lock); \
249 int min_sent_idx = 0; \
250 uint64_t min_sent_time = UINT64_MAX; \
251 for (int i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
252 if (first_bucket[i].srcip == key) { \
253 res.rate = &first_bucket[i]; \
254 res.lock = &buckets->lock; \
256 } else if (min_sent_time > (first_bucket[i].sent_time & RATE_TIME_MASK)) { \
257 min_sent_time = first_bucket[i].sent_time & RATE_TIME_MASK; \
261 res.rate = &first_bucket[min_sent_idx]; \
262 res.rate->srcip = key; \
263 res.rate->sent_time = 0; \
264 res.lock = &buckets->lock; \
268 CREATE_PERSRC_LOOKUP(6, uint128_t)
269 CREATE_PERSRC_LOOKUP(5, uint64_t) // IPv6 matching no more than a /64
270 CREATE_PERSRC_LOOKUP(4, uint32_t)
272 #define SRC_RATE_DEFINE(IPV, n, limit) \
273 struct persrc_rate##IPV##_bucket_##n { \
274 struct bpf_spin_lock lock; \
275 struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
278 __uint(type, BPF_MAP_TYPE_ARRAY); \
279 __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
281 struct persrc_rate##IPV##_bucket_##n *value; \
282 } v##IPV##_src_rate_##n SEC(".maps");
286 #ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
288 #endif /* HAVE_WRAPPER */
289 #endif /* not TEST */
290 int xdp_drop_prog(struct xdp_md *ctx)
292 const void *const data_end = (void *)(size_t)ctx->data_end;
294 const void * pktdata;
295 unsigned short eth_proto;
298 // DO_RETURN in CHECK_LEN relies on pktdata being set to calculate packet length.
299 // That said, we don't want to overflow, so just set packet length to 0 here.
301 CHECK_LEN((size_t)ctx->data, ethhdr);
302 const struct ethhdr *const eth = (void*)(size_t)ctx->data;
303 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
305 #if PARSE_8021Q == PARSE
306 if (likely(eth->h_proto == BE16(ETH_P_8021Q))) {
307 CHECK_LEN((size_t)ctx->data, ethhdr_vlan);
308 const struct ethhdr_vlan *const eth_vlan = (void*)(size_t)ctx->data;
309 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
311 if (unlikely((eth_vlan->tci & BE16(0xfff)) != BE16(REQ_8021Q)))
312 DO_RETURN(VLAN_DROP, XDP_DROP);
314 eth_proto = eth_vlan->h_proto;
316 if (unlikely(eth->h_proto == BE16(ETH_P_8021Q))) {
317 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
318 DO_RETURN(VLAN_DROP, PARSE_8021Q);
322 DO_RETURN(VLAN_DROP, XDP_DROP);
324 eth_proto = eth->h_proto;
329 const void *l4hdr = NULL;
330 const struct tcphdr *tcp = NULL;
331 int32_t sport = -1, dport = -1; // Host Endian! Only valid with tcp || udp
334 if (eth_proto == BE16(ETH_P_IP)) {
335 CHECK_LEN(pktdata, iphdr);
336 struct iphdr *ip = (struct iphdr*) pktdata;
338 #if PARSE_IHL == PARSE
339 if (unlikely(ip->ihl < 5)) DO_RETURN(IHL_DROP, XDP_DROP);
340 l4hdr = pktdata + ip->ihl * 4;
342 if (ip->ihl != 5) DO_RETURN(IHL_DROP, PARSE_IHL);
343 l4hdr = pktdata + 5*4;
346 const struct icmphdr *icmp = NULL;
347 if ((ip->frag_off & BE16(IP_OFFSET)) == 0) {
348 if (ip->protocol == IP_PROTO_TCP) {
349 CHECK_LEN(l4hdr, tcphdr);
350 tcp = (struct tcphdr*) l4hdr;
351 sport = BE16(tcp->source);
352 dport = BE16(tcp->dest);
353 } else if (ip->protocol == IP_PROTO_UDP) {
354 CHECK_LEN(l4hdr, udphdr);
355 const struct udphdr *udp = (struct udphdr*) l4hdr;
356 sport = BE16(udp->source);
357 dport = BE16(udp->dest);
358 } else if (ip->protocol == IP_PROTO_ICMP) {
359 CHECK_LEN(l4hdr, icmphdr);
360 icmp = (struct icmphdr*) l4hdr;
368 if (eth_proto == BE16(ETH_P_IPV6)) {
369 CHECK_LEN(pktdata, ip6hdr);
370 struct ip6hdr *ip6 = (struct ip6hdr*) pktdata;
372 l4hdr = pktdata + 40;
374 uint8_t v6nexthdr = ip6->nexthdr;
375 const struct ip6_fraghdr *frag6 = NULL;
377 #if PARSE_V6_FRAG == PARSE
378 if (ip6->nexthdr == IP6_PROTO_FRAG) {
379 CHECK_LEN(l4hdr, ip6_fraghdr);
380 frag6 = (struct ip6_fraghdr*) l4hdr;
381 l4hdr = l4hdr + sizeof(struct ip6_fraghdr);
382 v6nexthdr = frag6->nexthdr;
384 if (unlikely(ip6->nexthdr == IP6_PROTO_FRAG)) {
385 DO_RETURN(V6FRAG_DROP, PARSE_V6_FRAG);
389 // TODO: Handle more options?
391 const struct icmp6hdr *icmpv6 = NULL;
392 if (frag6 == NULL || (frag6->frag_off & BE16(IP6_FRAGOFF)) == 0) {
393 if (v6nexthdr == IP_PROTO_TCP) {
394 CHECK_LEN(l4hdr, tcphdr);
395 tcp = (struct tcphdr*) l4hdr;
396 sport = BE16(tcp->source);
397 dport = BE16(tcp->dest);
398 } else if (v6nexthdr == IP_PROTO_UDP) {
399 CHECK_LEN(l4hdr, udphdr);
400 const struct udphdr *udp = (struct udphdr*) l4hdr;
401 sport = BE16(udp->source);
402 dport = BE16(udp->dest);
403 } else if (v6nexthdr == IP6_PROTO_ICMPV6) {
404 CHECK_LEN(l4hdr, icmp6hdr);
405 icmpv6 = (struct icmp6hdr*) l4hdr;
422 struct xdp_md test = {
424 // -1 because sizeof includes a trailing null in the "string"
425 .data_end = (uint64_t)(d + sizeof(d) - 1),
427 assert(xdp_drop_prog(&test) == TEST_EXP);