X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=genrules.py;h=10f138f277cb2657931488743f0f3f3aa81f2390;hb=9257387510c6760e67d28d24fe520e6ef9cd59d4;hp=3f901e1c908a1595e35264279016e01e37874c7c;hpb=80e92801d22e22493f9f0505a8ed06e6ca95716d;p=flowspec-xdp diff --git a/genrules.py b/genrules.py index 3f901e1..10f138f 100755 --- a/genrules.py +++ b/genrules.py @@ -294,7 +294,7 @@ with open("rules.h", "w") as out: rules6 = "" rules4 = "" use_v6_frags = False - rulecnt = 0 + stats_rulecnt = 0 ratelimitcnt = 0 v4persrcratelimits = [] v5persrcratelimits = [] @@ -320,9 +320,19 @@ with open("rules.h", "w") as out: else: continue - def write_rule(r): - global rules4, rules6 - if proto == 6: + # LLVM can be pretty bad at optimizing out common subexpressions. Ideally we'd optimize + # by pulling common subexpressions in back-to-back rules out into a single check, but + # that's a bunch of work that LLVM really should do for us. Instead, we blindly guess + # that source-address is the least likely to be a common subexpression and rely on LLVM + # managing to pull out common subexpressions as long as they're the first check(s). By + # placing source-address checks last, LLVM should do at least some work for us. + # See https://bugs.llvm.org/show_bug.cgi?id=52455 + last_checks = "" + def write_rule(r, place_at_end=False): + global rules4, rules6, last_checks + if place_at_end: + last_checks += "\t\t" + r.replace("\n", " \\\n\t\t") + " \\\n" + elif proto == 6: rules6 += "\t\t" + r.replace("\n", " \\\n\t\t") + " \\\n" else: rules4 += "\t\t" + r.replace("\n", " \\\n\t\t") + " \\\n" @@ -337,7 +347,7 @@ with open("rules.h", "w") as out: else: offset = None if step.strip().startswith("src"): - write_rule(ip_to_rule(proto, nets[0], "saddr", offset)) + write_rule(ip_to_rule(proto, nets[0], "saddr", offset), True) else: write_rule(ip_to_rule(proto, nets[0], "daddr", offset)) elif step.strip().startswith("proto") and proto == 4: @@ -367,9 +377,14 @@ with open("rules.h", "w") as out: else: assert False + if proto == 6: + rules6 += last_checks + else: + rules4 += last_checks + # Now write the match handling! first_action = None - stats_action = None + stats_action = "" last_action = None for community in line.split("("): if not community.startswith("generic, "): @@ -406,68 +421,46 @@ with open("rules.h", "w") as out: first_action = "int64_t time_masked = bpf_ktime_get_ns() & RATE_TIME_MASK;\n" first_action += f"int64_t per_pkt_ns = (1000000000LL << RATE_BUCKET_INTEGER_BITS) / {math.floor(value)};\n" + if ty == "0x8006" or ty == "0x8306": + first_action += "uint64_t amt = data_end - pktdata;\n" + else: + first_action += "uint64_t amt = 1;\n" if ty == "0x8006" or ty == "0x800c": - spin_lock = "bpf_spin_lock(&rate->lock);" - spin_unlock = "bpf_spin_unlock(&rate->lock);" first_action += f"const uint32_t ratelimitidx = {ratelimitcnt};\n" first_action += "struct ratelimit *rate = bpf_map_lookup_elem(&rate_map, &ratelimitidx);\n" ratelimitcnt += 1 + first_action += "int matched = 0;\n" + first_action += "DO_RATE_LIMIT(bpf_spin_lock(&rate->lock), rate, time_masked, amt, per_pkt_ns, matched);\n" + first_action += "if (rate) { bpf_spin_unlock(&rate->lock); }\n" else: - spin_lock = "/* No locking as we're locked in get_v*_persrc_ratelimit */" - spin_unlock = "bpf_spin_unlock(rate_ptr.lock);" if proto == 4: if mid_byte > 32: continue first_action += f"const uint32_t srcip = ip->saddr & MASK4({mid_byte});\n" first_action += f"void *rate_map = &v4_src_rate_{len(v4persrcratelimits)};\n" - first_action += f"struct persrc_rate4_ptr rate_ptr = get_v4_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked);\n" - first_action += f"struct persrc_rate4_entry *rate = rate_ptr.rate;\n" + first_action += f"int matched = check_v4_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked, amt, per_pkt_ns);\n" v4persrcratelimits.append((high_byte + 1) * 4096) elif mid_byte <= 64: first_action += f"const uint64_t srcip = BE128BEHIGH64(ip6->saddr & MASK6({mid_byte}));\n" first_action += f"void *rate_map = &v5_src_rate_{len(v5persrcratelimits)};\n" - first_action += f"struct persrc_rate5_ptr rate_ptr = get_v5_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked);\n" - first_action += f"struct persrc_rate5_entry *rate = rate_ptr.rate;\n" + first_action += f"int matched = check_v5_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked, amt, per_pkt_ns);\n" v5persrcratelimits.append((high_byte + 1) * 4096) else: if mid_byte > 128: continue first_action += f"const uint128_t srcip = ip6->saddr & MASK6({mid_byte});\n" first_action += f"void *rate_map = &v6_src_rate_{len(v6persrcratelimits)};\n" - first_action += f"struct persrc_rate6_ptr rate_ptr = get_v6_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked);\n" - first_action += f"struct persrc_rate6_entry *rate = rate_ptr.rate;\n" + first_action += f"int matched = check_v6_persrc_ratelimit(srcip, rate_map, {(high_byte + 1) * 4096}, time_masked, amt, per_pkt_ns);\n" v6persrcratelimits.append((high_byte + 1) * 4096) - if ty == "0x8006" or ty == "0x8306": - first_action += "uint64_t amt = data_end - pktdata;\n" - else: - first_action += "uint64_t amt = 1;\n" - first_action += "if (rate) {\n" - first_action += f"\t{spin_lock}\n" - first_action += "\tint64_t bucket_pkts = (rate->sent_time & (~RATE_TIME_MASK)) >> (64 - RATE_BUCKET_BITS);\n" - # We mask the top 12 bits, so date overflows every 52 days, handled below - first_action += "\tint64_t time_diff = time_masked - ((int64_t)(rate->sent_time & RATE_TIME_MASK));\n" - first_action += "\tif (unlikely(time_diff < -1000000000 || time_diff > 16000000000)) {\n" - first_action += "\t\tbucket_pkts = 0;\n" - first_action += "\t} else {\n" - first_action += "\t\tif (unlikely(time_diff < 0)) { time_diff = 0; }\n" - first_action += f"\t\tint64_t pkts_since_last = (time_diff << RATE_BUCKET_BITS) * amt / per_pkt_ns;\n" - first_action += "\t\tbucket_pkts -= pkts_since_last;\n" - first_action += "\t}\n" - first_action += "\tif (bucket_pkts >= (((1 << RATE_BUCKET_INTEGER_BITS) - 1) << RATE_BUCKET_DECIMAL_BITS)) {\n" - first_action += f"\t\t{spin_unlock}\n" - first_action += "\t\t{stats_replace}\n" - first_action += "\t\treturn XDP_DROP;\n" - first_action += "\t} else {\n" - first_action += "\t\tif (unlikely(bucket_pkts < 0)) bucket_pkts = 0;\n" - first_action += f"\t\trate->sent_time = time_masked | ((bucket_pkts + (1 << RATE_BUCKET_DECIMAL_BITS)) << (64 - RATE_BUCKET_BITS));\n" - first_action += f"\t\t{spin_unlock}\n" - first_action += "\t}\n" + first_action += "if (matched) {\n" + first_action += "\t{stats_replace}\n" + first_action += "\treturn XDP_DROP;\n" first_action += "}\n" elif ty == "0x8007": if low_bytes & 1 == 0: last_action = "return XDP_PASS;" if low_bytes & 2 == 2: - stats_action = f"const uint32_t ruleidx = STATIC_RULE_CNT + {rulecnt};\n" + stats_action = f"const uint32_t ruleidx = STATIC_RULE_CNT + {stats_rulecnt};\n" stats_action += "INCREMENT_MATCH(ruleidx);" elif ty == "0x8008": assert False # We do not implement the redirect action @@ -487,7 +480,7 @@ with open("rules.h", "w") as out: write_rule("ip6->flow_lbl[0] = (ip6->flow_lbl[0] & 0x3f) | " + str((low_bytes & 3) << 6) + ";") if first_action is not None: write_rule(first_action.replace("{stats_replace}", stats_action)) - if stats_action is not None and (first_action is None or "{stats_replace}" not in first_action): + if stats_action != "" and (first_action is None or "{stats_replace}" not in first_action): write_rule(stats_action) if last_action is not None: write_rule(last_action) @@ -495,11 +488,13 @@ with open("rules.h", "w") as out: rules6 += "\t} while(0);\\\n" else: rules4 += "\t} while(0);\\\n" - rulecnt += 1 + if stats_action != "": + print(rule) + stats_rulecnt += 1 lastrule = None out.write("\n") - out.write(f"#define RULECNT {rulecnt}\n") + out.write(f"#define STATS_RULECNT {stats_rulecnt}\n") if ratelimitcnt != 0: out.write(f"#define RATE_CNT {ratelimitcnt}\n") if rules4 != "":