3 #include <linux/if_ether.h>
6 #include <linux/icmp.h>
7 #include <linux/icmpv6.h>
13 #define IP_CE 0x8000 /* Flag: "Congestion" */
14 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
15 #define IP_MF 0x2000 /* Flag: "More Fragments" */
16 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
18 #define IP_PROTO_TCP 6
19 #define IP_PROTO_UDP 17
20 #define IP_PROTO_ICMP 1
21 #define IP6_PROTO_ICMPV6 58
22 #define IP6_PROTO_FRAG 44
24 typedef __uint128_t uint128_t;
26 // Our own ipv6hdr that uses uint128_t
28 #if defined(__LITTLE_ENDIAN_BITFIELD)
31 #elif defined(__BIG_ENDIAN_BITFIELD)
35 #error "Please fix <asm/byteorder.h>"
45 } __attribute__((packed));
48 #define IP6_FRAGOFF 0xfff8
52 uint16_t frag_off; // BE low 3 bits flags, last is "more frags"
54 } __attribute__((packed));
56 // Our own ethhdr with optional vlan tags
58 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
59 unsigned char h_source[ETH_ALEN]; /* source ether addr */
60 __be16 vlan_magic; /* 0x8100 */
61 __be16 tci; /* PCP (3 bits), DEI (1 bit), and VLAN (12 bits) */
62 __be16 h_proto; /* packet type ID field */
63 } __attribute__((packed));
65 // Our own tcphdr without the flags blown up
75 } __attribute__((packed));
77 // Note that all operations on uint128s *stay* in Network byte order!
79 #if defined(__LITTLE_ENDIAN)
80 #define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
81 ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
82 ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
83 ((((uint32_t)(v)) << 3*8) & 0xff000000))
84 #elif defined(__BIG_ENDIAN)
85 #define BIGEND32(v) ((uint32_t)(v))
87 #error "Need endian info"
90 #if defined(__LITTLE_ENDIAN)
91 #define BIGEND128(a, b, c, d) ( \
92 (((uint128_t)BIGEND32(d)) << 3*32) | \
93 (((uint128_t)BIGEND32(c)) << 2*32) | \
94 (((uint128_t)BIGEND32(b)) << 1*32) | \
95 (((uint128_t)BIGEND32(a)) << 0*32))
96 #define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
97 // Yes, somehow macro'ing this changes LLVM's view of htons...
98 #define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
99 #define BE128BEHIGH64(val) ((uint64_t)((uint128_t)(val)))
101 #elif defined(__BIG_ENDIAN)
103 #define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
104 #define HTON128(a) ((uint128_t)(a))
105 #define BE16(a) ((uint16_t)(a))
106 #define BE128BEHIGH64(val) ((uint64_t)(((uint128_t)(val)) >> 64))
109 #error "Need endian info"
112 #define MASK4(pfxlen) BIGEND32(~((((uint32_t)1) << (32 - pfxlen)) - 1))
113 #define MASK6(pfxlen) HTON128(~((((uint128_t)1) << (128 - pfxlen)) - 1))
114 #define MASK6_OFFS(offs, pfxlen) HTON128((~((((uint128_t)1) << (128 - pfxlen)) - 1)) & ((((uint128_t)1) << (128 - offs)) - 1))
116 // PARSE is used as a preprocessor flag to indicate parsing fields
120 #define unlikely(a) __builtin_expect(a, 0)
121 #define likely(a) __builtin_expect(a, 1)
123 static const uint32_t PKT_LEN_DROP = 0;
124 static const uint32_t VLAN_DROP = 1;
125 static const uint32_t IHL_DROP = 2;
126 static const uint32_t V6FRAG_DROP = 3;
127 #define STATIC_RULE_CNT 4
129 #define DO_RETURN(reason, ret) {\
130 if (ret == XDP_DROP) { INCREMENT_MATCH(reason); } \
134 // It seems (based on drop counts) that data_end points to the last byte, not one-past-the-end.
135 // This feels strange, but some documentation suggests > here as well, so we stick with that.
136 #define CHECK_LEN(start, struc) \
137 if (unlikely((void*)(start) + sizeof(struct struc) > data_end)) DO_RETURN(PKT_LEN_DROP, XDP_DROP);
140 // 64 bit version of xdp_md for testing
145 /* Below access go through struct xdp_rxq_info */
146 __u64 ingress_ifindex; /* rxq->dev->ifindex */
147 __u64 rx_queue_index; /* rxq->queue_index */
149 __u64 egress_ifindex; /* txq->dev->ifindex */
151 static const int XDP_PASS = 0;
152 static const int XDP_DROP = 1;
154 static long drop_cnt_map[STATS_RULECNT + STATIC_RULE_CNT];
155 #define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
158 #include <linux/bpf.h>
159 #include <bpf/bpf_helpers.h>
161 struct match_counter {
166 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
167 __uint(max_entries, STATS_RULECNT + STATIC_RULE_CNT);
169 struct match_counter *value;
170 } drop_cnt_map SEC(".maps");
172 #define INCREMENT_MATCH(reason) { \
173 struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
175 value->bytes += data_end - pktdata; \
176 value->packets += 1; \
180 // Rate limits are done in a static-sized leaky bucket with a decimal counter
181 // Bucket size is always exactly (1 << RATE_BUCKET_INTEGER_BITS)
182 #define RATE_BUCKET_DECIMAL_BITS 8
183 #define RATE_BUCKET_INTEGER_BITS 4
185 #define RATE_BUCKET_BITS (RATE_BUCKET_DECIMAL_BITS + RATE_BUCKET_INTEGER_BITS)
186 #define RATE_TIME_MASK ((1ULL << (64 - RATE_BUCKET_BITS)) - 1)
188 // Time going backwards 10ms+ or forward 32sec+ implies we should consider it
189 // an overflow, or at least stale enough that we should reset the entry.
190 #define RATE_MIN_TIME_OFFSET -10000000LL
191 #define RATE_MAX_TIME_OFFSET 32000000000LL
195 struct bpf_spin_lock lock;
199 __uint(type, BPF_MAP_TYPE_ARRAY);
200 __uint(max_entries, RATE_CNT);
202 struct ratelimit *value;
203 } rate_map SEC(".maps");
204 #endif /* RATE_CNT */
206 // We implement a rather naive hashtable here instead of using a BPF map because
207 // (a) the BPF map hashtables are similarly naive (no rehashing, etc),
208 // (b) the BPF map LRU hashtables don't support locking.
210 // We first separate into a few top-level buckets with per-bucket locks, limiting
211 // us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
213 // Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
214 // which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
215 // in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
217 // Because we use buckets of size 16, see collision_prob.py, the number of
218 // elements we can hold with only a 1% probability of overflowing a bucket is:
220 // 128K-entry hash table (2MiB): ~33K sources
221 // 256K-entry hash table (4MiB): ~63K sources
222 // 512K-entry hash table (8MiB): ~119K sources
223 // 1M-entry hash table (16MiB): ~227K sources
224 #define SRC_HASH_MAX_PARALLELISM_POW 8
225 #define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
226 #define SRC_HASH_BUCKET_COUNT_POW 4
227 #define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
229 #define DO_RATE_LIMIT(do_lock, rate, time_masked, amt_in_pkt, limit_ns_per_pkt, matchbool) do { \
232 int64_t bucket_pkts = (rate->sent_time & (~RATE_TIME_MASK)) >> (64 - RATE_BUCKET_BITS); \
233 /* We mask the top 12 bits, so date overflows every 52 days, handled below */ \
234 int64_t time_diff = time_masked - ((int64_t)(rate->sent_time & RATE_TIME_MASK)); \
235 if (unlikely(time_diff < -1000000000 || time_diff > 16000000000)) { \
238 if (unlikely(time_diff < 0)) { time_diff = 0; } \
239 int64_t pkts_since_last = (time_diff << RATE_BUCKET_BITS) * ((uint64_t)amt_in_pkt) / ((uint64_t)limit_ns_per_pkt); \
240 bucket_pkts -= pkts_since_last; \
242 if (bucket_pkts < (((1 << RATE_BUCKET_INTEGER_BITS) - 1) << RATE_BUCKET_DECIMAL_BITS)) { \
243 if (unlikely(bucket_pkts < 0)) bucket_pkts = 0; \
244 rate->sent_time = time_masked | ((bucket_pkts + (1 << RATE_BUCKET_DECIMAL_BITS)) << (64 - RATE_BUCKET_BITS)); \
252 #define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
253 struct persrc_rate##IPV##_entry { \
254 uint64_t sent_time; \
258 struct persrc_rate##IPV##_bucket { \
259 struct bpf_spin_lock lock; \
260 struct persrc_rate##IPV##_entry entries[]; \
263 static int check_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit, int64_t cur_time_masked, uint64_t amt, uint64_t limit_ns_per_pkt) { \
264 uint64_t hash = siphash_##IP_TYPE(key); \
266 const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
267 struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
268 if (!buckets) return 0; \
270 hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
271 map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
273 struct persrc_rate##IPV##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
274 bpf_spin_lock(&buckets->lock); \
276 uint64_t min_sent_idx = 0; /* Must be uint64_t or BPF verifier gets lost and thinks it can be any value */ \
277 uint64_t min_sent_time = UINT64_MAX; \
278 for (uint64_t i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
279 if (first_bucket[i].srcip == key) { \
283 int64_t time_offset = ((int64_t)cur_time_masked) - (first_bucket[i].sent_time & RATE_TIME_MASK); \
284 if (time_offset < RATE_MIN_TIME_OFFSET || time_offset > RATE_MAX_TIME_OFFSET) { \
288 if ((first_bucket[i].sent_time & RATE_TIME_MASK) < min_sent_time) { \
289 min_sent_time = first_bucket[i].sent_time & RATE_TIME_MASK; \
293 struct persrc_rate##IPV##_entry *entry = &first_bucket[min_sent_idx]; \
294 if (entry->srcip != key) { \
295 entry->srcip = key; \
296 entry->sent_time = 0; \
299 DO_RATE_LIMIT(, entry, cur_time_masked, amt, limit_ns_per_pkt, matched); \
300 bpf_spin_unlock(&buckets->lock); \
304 CREATE_PERSRC_LOOKUP(6, uint128_t)
305 CREATE_PERSRC_LOOKUP(5, uint64_t) // IPv6 matching no more than a /64
306 CREATE_PERSRC_LOOKUP(4, uint32_t)
308 #define SRC_RATE_DEFINE(IPV, n, limit) \
309 struct persrc_rate##IPV##_bucket_##n { \
310 struct bpf_spin_lock lock; \
311 struct persrc_rate##IPV##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
314 __uint(type, BPF_MAP_TYPE_ARRAY); \
315 __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
317 struct persrc_rate##IPV##_bucket_##n *value; \
318 } v##IPV##_src_rate_##n SEC(".maps");
322 #ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
324 #endif /* HAVE_WRAPPER */
325 #endif /* not TEST */
326 int xdp_drop_prog(struct xdp_md *ctx)
328 const void *const data_end = (void *)(size_t)ctx->data_end;
330 const void * pktdata;
331 unsigned short eth_proto;
334 // DO_RETURN in CHECK_LEN relies on pktdata being set to calculate packet length.
335 // That said, we don't want to overflow, so just set packet length to 0 here.
337 CHECK_LEN((size_t)ctx->data, ethhdr);
338 const struct ethhdr *const eth = (void*)(size_t)ctx->data;
339 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
341 #if PARSE_8021Q == PARSE
342 if (likely(eth->h_proto == BE16(ETH_P_8021Q))) {
343 CHECK_LEN((size_t)ctx->data, ethhdr_vlan);
344 const struct ethhdr_vlan *const eth_vlan = (void*)(size_t)ctx->data;
345 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
347 if (unlikely((eth_vlan->tci & BE16(0xfff)) != BE16(REQ_8021Q)))
348 DO_RETURN(VLAN_DROP, XDP_DROP);
350 eth_proto = eth_vlan->h_proto;
352 if (unlikely(eth->h_proto == BE16(ETH_P_8021Q))) {
353 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
354 DO_RETURN(VLAN_DROP, PARSE_8021Q);
358 DO_RETURN(VLAN_DROP, XDP_DROP);
360 eth_proto = eth->h_proto;
365 const void *l4hdr = NULL;
366 const struct tcphdr *tcp = NULL;
367 int32_t sport = -1, dport = -1; // Host Endian! Only valid with tcp || udp
370 if (eth_proto == BE16(ETH_P_IP)) {
371 CHECK_LEN(pktdata, iphdr);
372 struct iphdr *ip = (struct iphdr*) pktdata;
374 #if PARSE_IHL == PARSE
375 if (unlikely(ip->ihl < 5)) DO_RETURN(IHL_DROP, XDP_DROP);
376 l4hdr = pktdata + ip->ihl * 4;
378 if (ip->ihl != 5) DO_RETURN(IHL_DROP, PARSE_IHL);
379 l4hdr = pktdata + 5*4;
382 const struct icmphdr *icmp = NULL;
383 if ((ip->frag_off & BE16(IP_OFFSET)) == 0) {
384 if (ip->protocol == IP_PROTO_TCP) {
385 CHECK_LEN(l4hdr, tcphdr);
386 tcp = (struct tcphdr*) l4hdr;
387 sport = BE16(tcp->source);
388 dport = BE16(tcp->dest);
389 } else if (ip->protocol == IP_PROTO_UDP) {
390 CHECK_LEN(l4hdr, udphdr);
391 const struct udphdr *udp = (struct udphdr*) l4hdr;
392 sport = BE16(udp->source);
393 dport = BE16(udp->dest);
394 } else if (ip->protocol == IP_PROTO_ICMP) {
395 CHECK_LEN(l4hdr, icmphdr);
396 icmp = (struct icmphdr*) l4hdr;
404 if (eth_proto == BE16(ETH_P_IPV6)) {
405 CHECK_LEN(pktdata, ip6hdr);
406 struct ip6hdr *ip6 = (struct ip6hdr*) pktdata;
408 l4hdr = pktdata + 40;
410 uint8_t v6nexthdr = ip6->nexthdr;
411 const struct ip6_fraghdr *frag6 = NULL;
413 #if PARSE_V6_FRAG == PARSE
414 if (ip6->nexthdr == IP6_PROTO_FRAG) {
415 CHECK_LEN(l4hdr, ip6_fraghdr);
416 frag6 = (struct ip6_fraghdr*) l4hdr;
417 l4hdr = l4hdr + sizeof(struct ip6_fraghdr);
418 v6nexthdr = frag6->nexthdr;
420 if (unlikely(ip6->nexthdr == IP6_PROTO_FRAG)) {
421 DO_RETURN(V6FRAG_DROP, PARSE_V6_FRAG);
425 // TODO: Handle more options?
427 const struct icmp6hdr *icmpv6 = NULL;
428 if (frag6 == NULL || (frag6->frag_off & BE16(IP6_FRAGOFF)) == 0) {
429 if (v6nexthdr == IP_PROTO_TCP) {
430 CHECK_LEN(l4hdr, tcphdr);
431 tcp = (struct tcphdr*) l4hdr;
432 sport = BE16(tcp->source);
433 dport = BE16(tcp->dest);
434 } else if (v6nexthdr == IP_PROTO_UDP) {
435 CHECK_LEN(l4hdr, udphdr);
436 const struct udphdr *udp = (struct udphdr*) l4hdr;
437 sport = BE16(udp->source);
438 dport = BE16(udp->dest);
439 } else if (v6nexthdr == IP6_PROTO_ICMPV6) {
440 CHECK_LEN(l4hdr, icmp6hdr);
441 icmpv6 = (struct icmp6hdr*) l4hdr;
458 struct xdp_md test = {
460 // -1 because sizeof includes a trailing null in the "string"
461 .data_end = (uint64_t)(d + sizeof(d) - 1),
463 assert(xdp_drop_prog(&test) == TEST_EXP);