3 #include <linux/if_ether.h>
6 #include <linux/icmp.h>
7 #include <linux/icmpv6.h>
13 #define IP_CE 0x8000 /* Flag: "Congestion" */
14 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
15 #define IP_MF 0x2000 /* Flag: "More Fragments" */
16 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
18 #define IP_PROTO_TCP 6
19 #define IP_PROTO_UDP 17
20 #define IP_PROTO_ICMP 1
21 #define IP6_PROTO_ICMPV6 58
22 #define IP6_PROTO_FRAG 44
24 typedef __uint128_t uint128_t;
26 // Our own ipv6hdr that uses uint128_t
28 #if defined(__LITTLE_ENDIAN_BITFIELD)
31 #elif defined(__BIG_ENDIAN_BITFIELD)
35 #error "Please fix <asm/byteorder.h>"
45 } __attribute__((packed));
48 #define IP6_FRAGOFF 0xfff8
52 uint16_t frag_off; // BE low 3 bits flags, last is "more frags"
54 } __attribute__((packed));
56 // Our own ethhdr with optional vlan tags
58 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
59 unsigned char h_source[ETH_ALEN]; /* source ether addr */
60 __be16 vlan_magic; /* 0x8100 */
61 __be16 tci; /* PCP (3 bits), DEI (1 bit), and VLAN (12 bits) */
62 __be16 h_proto; /* packet type ID field */
63 } __attribute__((packed));
65 // Our own tcphdr without the flags blown up
75 } __attribute__((packed));
77 // Note that all operations on uint128s *stay* in Network byte order!
79 #if defined(__LITTLE_ENDIAN)
80 #define BIGEND32(v) (((((uint32_t)(v)) >> 3*8) & 0xff) | \
81 ((((uint32_t)(v)) >> 1*8) & 0xff00) | \
82 ((((uint32_t)(v)) << 1*8) & 0xff0000) | \
83 ((((uint32_t)(v)) << 3*8) & 0xff000000))
84 #elif defined(__BIG_ENDIAN)
85 #define BIGEND32(v) ((uint32_t)(v))
87 #error "Need endian info"
90 #if defined(__LITTLE_ENDIAN)
91 #define BIGEND128(a, b, c, d) ( \
92 (((uint128_t)BIGEND32(d)) << 3*32) | \
93 (((uint128_t)BIGEND32(c)) << 2*32) | \
94 (((uint128_t)BIGEND32(b)) << 1*32) | \
95 (((uint128_t)BIGEND32(a)) << 0*32))
96 #define HTON128(a) BIGEND128(a >> 3*32, a >> 2*32, a >> 1*32, a>> 0*32)
97 // Yes, somehow macro'ing this changes LLVM's view of htons...
98 #define BE16(a) (((((uint16_t)a) & 0xff00) >> 8) | ((((uint16_t)a) & 0xff) << 8))
99 #define BE128BEHIGH64(val) ((uint64_t)((uint128_t)(val)))
101 #elif defined(__BIG_ENDIAN)
103 #define BIGEND128(a, b, c, d) ((((uint128_t)(a)) << 3*32) | (((uint128_t)(b)) << 2*32) | (((uint128_t)(c)) << 1*32) | (((uint128_t)(d)) << 0*32))
104 #define HTON128(a) ((uint128_t)(a))
105 #define BE16(a) ((uint16_t)(a))
106 #define BE128BEHIGH64(val) ((uint64_t)(((uint128_t)(val)) >> 64))
109 #error "Need endian info"
112 #define MASK4(pfxlen) BIGEND32(~((((uint32_t)1) << (32 - pfxlen)) - 1))
113 #define MASK6(pfxlen) HTON128(~((((uint128_t)1) << (128 - pfxlen)) - 1))
114 #define MASK6_OFFS(offs, pfxlen) HTON128((~((((uint128_t)1) << (128 - pfxlen)) - 1)) & ((((uint128_t)1) << (128 - offs)) - 1))
116 // PARSE is used as a preprocessor flag to indicate parsing fields
120 #define unlikely(a) __builtin_expect(a, 0)
121 #define likely(a) __builtin_expect(a, 1)
123 static const uint32_t PKT_LEN_DROP = 0;
124 static const uint32_t VLAN_DROP = 1;
125 static const uint32_t IHL_DROP = 2;
126 static const uint32_t V6FRAG_DROP = 3;
127 #define STATIC_RULE_CNT 4
129 #define DO_RETURN(reason, ret) {\
130 if (ret == XDP_DROP) { INCREMENT_MATCH(reason); } \
134 // It seems (based on drop counts) that data_end points to the last byte, not one-past-the-end.
135 // This feels strange, but some documentation suggests > here as well, so we stick with that.
136 #define CHECK_LEN(start, struc) \
137 if (unlikely((void*)(start) + sizeof(struct struc) > data_end)) DO_RETURN(PKT_LEN_DROP, XDP_DROP);
140 // 64 bit version of xdp_md for testing
145 /* Below access go through struct xdp_rxq_info */
146 __u64 ingress_ifindex; /* rxq->dev->ifindex */
147 __u64 rx_queue_index; /* rxq->queue_index */
149 __u64 egress_ifindex; /* txq->dev->ifindex */
151 static const int XDP_PASS = 0;
152 static const int XDP_DROP = 1;
154 static long drop_cnt_map[STATS_RULECNT + STATIC_RULE_CNT];
155 #define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
158 #include <linux/bpf.h>
159 #include <bpf/bpf_helpers.h>
161 struct match_counter {
166 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
167 __uint(max_entries, STATS_RULECNT + STATIC_RULE_CNT);
169 struct match_counter *value;
170 } drop_cnt_map SEC(".maps");
172 #define INCREMENT_MATCH(reason) { \
173 struct match_counter *value = bpf_map_lookup_elem(&drop_cnt_map, &reason); \
175 value->bytes += data_end - pktdata; \
176 value->packets += 1; \
180 // Rate limits are done in a static-sized leaky bucket with a decimal counter
182 // They are stored in a single uint64_t with the top RATE_BUCKET_BITS holding
183 // the packet count/size and the remaining low bits holding the the time (as a
184 // fixed-point decimal).
186 // Bucket size is always exactly (1 << RATE_BUCKET_INTEGER_BITS)
187 #define RATE_BUCKET_DECIMAL_BITS 8
188 #define RATE_BUCKET_INTEGER_BITS 4
190 #define RATE_BUCKET_BITS (RATE_BUCKET_DECIMAL_BITS + RATE_BUCKET_INTEGER_BITS)
191 #define RATE_TIME_MASK ((1ULL << (64 - RATE_BUCKET_BITS)) - 1)
193 // Time going backwards 10ms+ or forward 32sec+ implies we should consider it
194 // an overflow, or at least stale enough that we should reset the entry.
195 #define RATE_MIN_TIME_OFFSET -10000000LL
196 #define RATE_MAX_TIME_OFFSET 32000000000LL
200 struct bpf_spin_lock lock;
201 uint64_t pkts_and_time;
204 __uint(type, BPF_MAP_TYPE_ARRAY);
205 __uint(max_entries, RATE_CNT);
207 struct ratelimit *value;
208 } rate_map SEC(".maps");
209 #endif /* RATE_CNT */
211 // We implement a rather naive hashtable here instead of using a BPF map because
212 // (a) the BPF map hashtables are similarly naive (no rehashing, etc),
213 // (b) the BPF map LRU hashtables don't support locking.
215 // We first separate into a few top-level buckets with per-bucket locks, limiting
216 // us to 2^SRC_HASH_MAX_PARALLELISM parallel accessors.
218 // Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
219 // which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
220 // in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
222 // Because we use buckets of size 16, see collision_prob.py, the number of
223 // elements we can hold with only a 1% probability of overflowing a bucket is:
225 // 128K-entry hash table (2MiB): ~33K sources
226 // 256K-entry hash table (4MiB): ~63K sources
227 // 512K-entry hash table (8MiB): ~119K sources
228 // 1M-entry hash table (16MiB): ~227K sources
229 #define SRC_HASH_MAX_PARALLELISM_POW 8
230 #define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
231 #define SRC_HASH_BUCKET_COUNT_POW 4
232 #define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
234 #define DO_RATE_LIMIT(do_lock, rate, time_masked, amt_in_pkt, limit_ns_per_pkt, matchbool) do { \
237 int64_t bucket_pkts = (rate->pkts_and_time & (~RATE_TIME_MASK)) >> (64 - RATE_BUCKET_BITS); \
238 /* We mask the top 12 bits, so date overflows every 52 days, resetting the counter */ \
239 int64_t time_diff = time_masked - ((int64_t)(rate->pkts_and_time & RATE_TIME_MASK)); \
240 if (unlikely(time_diff < RATE_MIN_TIME_OFFSET || time_diff > RATE_MAX_TIME_OFFSET)) { \
243 if (unlikely(time_diff < 0)) { time_diff = 0; } \
244 /* To avoid storing too many bits, we make a simplifying assumption that all packets */ \
245 /* hit by a rule are the same size. Thus, when a rule is denominated in bytes rather */ \
246 /* than packets, we can keep counting packets and simply adjust the ratelimit by the*/ \
247 /* size of the packet we're looking at. */ \
248 /* Thus, here, we simply reduce our packet counter by the */ \
249 /* time difference / (our ns/packet limit * the size of the current packet). */ \
250 /* We shift by RATE_BUCKET_DECIMAL_BITS first since we're calculating whole packets. */ \
251 int64_t pkts_allowed_since_last_update = \
252 (time_diff << RATE_BUCKET_DECIMAL_BITS) / (((uint64_t)amt_in_pkt) * ((uint64_t)limit_ns_per_pkt)); \
253 bucket_pkts -= pkts_allowed_since_last_update; \
255 /* Accept as long as we can add one to our bucket without overflow */ \
256 const int64_t MAX_PACKETS = (1 << RATE_BUCKET_INTEGER_BITS) - 2; \
257 if (bucket_pkts <= (MAX_PACKETS << RATE_BUCKET_DECIMAL_BITS)) { \
258 if (unlikely(bucket_pkts < 0)) bucket_pkts = 0; \
259 int64_t new_packet_count = bucket_pkts + (1 << RATE_BUCKET_DECIMAL_BITS); \
260 if (new_packet_count < 0) { new_packet_count = 0; } \
261 rate->pkts_and_time = time_masked | (new_packet_count << (64 - RATE_BUCKET_BITS)); \
269 #define CREATE_PERSRC_LOOKUP(LEN, IP_TYPE) \
270 struct persrc_rate_##LEN##_entry { \
271 uint64_t pkts_and_time; \
275 struct persrc_rate_##LEN##_bucket { \
276 struct bpf_spin_lock lock; \
277 struct persrc_rate_##LEN##_entry entries[]; \
280 static int check_persrc_ratelimit_##LEN(IP_TYPE key, void *map, size_t map_limit, int64_t cur_time_masked, uint64_t amt, uint64_t limit_ns_per_pkt) { \
281 uint64_t hash = siphash_##IP_TYPE(key); \
283 const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
284 struct persrc_rate_##LEN##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \
285 if (!buckets) return 0; \
287 hash >>= SRC_HASH_MAX_PARALLELISM_POW; \
288 map_limit >>= SRC_HASH_MAX_PARALLELISM_POW; \
290 struct persrc_rate_##LEN##_entry *first_bucket = &buckets->entries[(hash % map_limit) & (~(SRC_HASH_BUCKET_COUNT - 1))]; \
291 bpf_spin_lock(&buckets->lock); \
293 uint64_t bucket_idx = SRC_HASH_BUCKET_COUNT; \
294 uint64_t min_sent_idx = 0; \
295 uint64_t min_time = UINT64_MAX; \
296 for (uint64_t i = 0; i < SRC_HASH_BUCKET_COUNT; i++) { \
297 if (first_bucket[i].srcip == key) { \
301 int64_t time_offset = ((int64_t)cur_time_masked) - (first_bucket[i].pkts_and_time & RATE_TIME_MASK); \
302 if (time_offset < RATE_MIN_TIME_OFFSET || time_offset > RATE_MAX_TIME_OFFSET) { \
306 if ((first_bucket[i].pkts_and_time & RATE_TIME_MASK) < min_time) { \
307 min_time = first_bucket[i].pkts_and_time & RATE_TIME_MASK; \
311 if (bucket_idx >= SRC_HASH_BUCKET_COUNT) bucket_idx = min_sent_idx; \
312 struct persrc_rate_##LEN##_entry *entry = &first_bucket[bucket_idx]; \
313 if (entry->srcip != key) { \
314 entry->srcip = key; \
315 entry->pkts_and_time = 0; \
318 DO_RATE_LIMIT(, entry, cur_time_masked, amt, limit_ns_per_pkt, matched); \
319 bpf_spin_unlock(&buckets->lock); \
323 CREATE_PERSRC_LOOKUP(128, uint128_t)
324 CREATE_PERSRC_LOOKUP(64, uint64_t) // IPv6 matching no more than a /64 and IPv4
326 #define SRC_RATE_DEFINE(LEN, n, limit) \
327 struct persrc_rate_##LEN##_bucket_##n { \
328 struct bpf_spin_lock lock; \
329 struct persrc_rate_##LEN##_entry entries[limit / SRC_HASH_MAX_PARALLELISM]; \
332 __uint(type, BPF_MAP_TYPE_ARRAY); \
333 __uint(max_entries, SRC_HASH_MAX_PARALLELISM); \
335 struct persrc_rate_##LEN##_bucket_##n *value; \
336 } src_rate_##LEN##_##n SEC(".maps");
340 #ifndef HAVE_WRAPPER // Set this to call xdp_drop externally
342 #endif /* HAVE_WRAPPER */
343 #endif /* not TEST */
344 int xdp_drop_prog(struct xdp_md *ctx)
346 const void *const data_end = (void *)(size_t)ctx->data_end;
348 const void * pktdata;
349 unsigned short eth_proto;
352 // DO_RETURN in CHECK_LEN relies on pktdata being set to calculate packet length.
353 // That said, we don't want to overflow, so just set packet length to 0 here.
355 CHECK_LEN((size_t)ctx->data, ethhdr);
356 const struct ethhdr *const eth = (void*)(size_t)ctx->data;
357 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr);
359 #if PARSE_8021Q == PARSE
360 if (likely(eth->h_proto == BE16(ETH_P_8021Q))) {
361 CHECK_LEN((size_t)ctx->data, ethhdr_vlan);
362 const struct ethhdr_vlan *const eth_vlan = (void*)(size_t)ctx->data;
363 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
365 if (unlikely((eth_vlan->tci & BE16(0xfff)) != BE16(REQ_8021Q)))
366 DO_RETURN(VLAN_DROP, XDP_DROP);
368 eth_proto = eth_vlan->h_proto;
370 if (unlikely(eth->h_proto == BE16(ETH_P_8021Q))) {
371 pktdata = (const void *)(long)ctx->data + sizeof(struct ethhdr_vlan);
372 DO_RETURN(VLAN_DROP, PARSE_8021Q);
376 DO_RETURN(VLAN_DROP, XDP_DROP);
378 eth_proto = eth->h_proto;
383 const void *l4hdr = NULL;
384 const struct tcphdr *tcp = NULL;
385 int32_t sport = -1, dport = -1; // Host Endian! Only valid with tcp || udp
388 if (eth_proto == BE16(ETH_P_IP)) {
389 CHECK_LEN(pktdata, iphdr);
390 struct iphdr *ip = (struct iphdr*) pktdata;
392 #if PARSE_IHL == PARSE
393 if (unlikely(ip->ihl < 5)) DO_RETURN(IHL_DROP, XDP_DROP);
394 l4hdr = pktdata + ip->ihl * 4;
396 if (ip->ihl != 5) DO_RETURN(IHL_DROP, PARSE_IHL);
397 l4hdr = pktdata + 5*4;
400 const struct icmphdr *icmp = NULL;
401 if ((ip->frag_off & BE16(IP_OFFSET)) == 0) {
402 if (ip->protocol == IP_PROTO_TCP) {
403 CHECK_LEN(l4hdr, tcphdr);
404 tcp = (struct tcphdr*) l4hdr;
405 sport = BE16(tcp->source);
406 dport = BE16(tcp->dest);
407 } else if (ip->protocol == IP_PROTO_UDP) {
408 CHECK_LEN(l4hdr, udphdr);
409 const struct udphdr *udp = (struct udphdr*) l4hdr;
410 sport = BE16(udp->source);
411 dport = BE16(udp->dest);
412 } else if (ip->protocol == IP_PROTO_ICMP) {
413 CHECK_LEN(l4hdr, icmphdr);
414 icmp = (struct icmphdr*) l4hdr;
422 if (eth_proto == BE16(ETH_P_IPV6)) {
423 CHECK_LEN(pktdata, ip6hdr);
424 struct ip6hdr *ip6 = (struct ip6hdr*) pktdata;
426 l4hdr = pktdata + 40;
428 uint8_t v6nexthdr = ip6->nexthdr;
429 const struct ip6_fraghdr *frag6 = NULL;
431 #if PARSE_V6_FRAG == PARSE
432 if (ip6->nexthdr == IP6_PROTO_FRAG) {
433 CHECK_LEN(l4hdr, ip6_fraghdr);
434 frag6 = (struct ip6_fraghdr*) l4hdr;
435 l4hdr = l4hdr + sizeof(struct ip6_fraghdr);
436 v6nexthdr = frag6->nexthdr;
438 if (unlikely(ip6->nexthdr == IP6_PROTO_FRAG)) {
439 DO_RETURN(V6FRAG_DROP, PARSE_V6_FRAG);
443 // TODO: Handle more options?
445 const struct icmp6hdr *icmpv6 = NULL;
446 if (frag6 == NULL || (frag6->frag_off & BE16(IP6_FRAGOFF)) == 0) {
447 if (v6nexthdr == IP_PROTO_TCP) {
448 CHECK_LEN(l4hdr, tcphdr);
449 tcp = (struct tcphdr*) l4hdr;
450 sport = BE16(tcp->source);
451 dport = BE16(tcp->dest);
452 } else if (v6nexthdr == IP_PROTO_UDP) {
453 CHECK_LEN(l4hdr, udphdr);
454 const struct udphdr *udp = (struct udphdr*) l4hdr;
455 sport = BE16(udp->source);
456 dport = BE16(udp->dest);
457 } else if (v6nexthdr == IP6_PROTO_ICMPV6) {
458 CHECK_LEN(l4hdr, icmp6hdr);
459 icmpv6 = (struct icmp6hdr*) l4hdr;
476 struct xdp_md test = {
478 // -1 because sizeof includes a trailing null in the "string"
479 .data_end = (uint64_t)(d + sizeof(d) - 1),
481 assert(xdp_drop_prog(&test) == TEST_EXP);