root/tools/testing/selftests/bpf/verifier/ref_tracking.c

/* [<][>][^][v][top][bottom][index][help] */
   1 {
   2         "reference tracking: leak potential reference",
   3         .insns = {
   4         BPF_SK_LOOKUP(sk_lookup_tcp),
   5         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
   6         BPF_EXIT_INSN(),
   7         },
   8         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
   9         .errstr = "Unreleased reference",
  10         .result = REJECT,
  11 },
  12 {
  13         "reference tracking: leak potential reference to sock_common",
  14         .insns = {
  15         BPF_SK_LOOKUP(skc_lookup_tcp),
  16         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
  17         BPF_EXIT_INSN(),
  18         },
  19         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  20         .errstr = "Unreleased reference",
  21         .result = REJECT,
  22 },
  23 {
  24         "reference tracking: leak potential reference on stack",
  25         .insns = {
  26         BPF_SK_LOOKUP(sk_lookup_tcp),
  27         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  28         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  29         BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  30         BPF_MOV64_IMM(BPF_REG_0, 0),
  31         BPF_EXIT_INSN(),
  32         },
  33         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  34         .errstr = "Unreleased reference",
  35         .result = REJECT,
  36 },
  37 {
  38         "reference tracking: leak potential reference on stack 2",
  39         .insns = {
  40         BPF_SK_LOOKUP(sk_lookup_tcp),
  41         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  42         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  43         BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  44         BPF_MOV64_IMM(BPF_REG_0, 0),
  45         BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  46         BPF_EXIT_INSN(),
  47         },
  48         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  49         .errstr = "Unreleased reference",
  50         .result = REJECT,
  51 },
  52 {
  53         "reference tracking: zero potential reference",
  54         .insns = {
  55         BPF_SK_LOOKUP(sk_lookup_tcp),
  56         BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
  57         BPF_EXIT_INSN(),
  58         },
  59         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  60         .errstr = "Unreleased reference",
  61         .result = REJECT,
  62 },
  63 {
  64         "reference tracking: zero potential reference to sock_common",
  65         .insns = {
  66         BPF_SK_LOOKUP(skc_lookup_tcp),
  67         BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
  68         BPF_EXIT_INSN(),
  69         },
  70         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  71         .errstr = "Unreleased reference",
  72         .result = REJECT,
  73 },
  74 {
  75         "reference tracking: copy and zero potential references",
  76         .insns = {
  77         BPF_SK_LOOKUP(sk_lookup_tcp),
  78         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  79         BPF_MOV64_IMM(BPF_REG_0, 0),
  80         BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
  81         BPF_EXIT_INSN(),
  82         },
  83         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  84         .errstr = "Unreleased reference",
  85         .result = REJECT,
  86 },
  87 {
  88         "reference tracking: release reference without check",
  89         .insns = {
  90         BPF_SK_LOOKUP(sk_lookup_tcp),
  91         /* reference in r0 may be NULL */
  92         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  93         BPF_MOV64_IMM(BPF_REG_2, 0),
  94         BPF_EMIT_CALL(BPF_FUNC_sk_release),
  95         BPF_EXIT_INSN(),
  96         },
  97         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  98         .errstr = "type=sock_or_null expected=sock",
  99         .result = REJECT,
 100 },
 101 {
 102         "reference tracking: release reference to sock_common without check",
 103         .insns = {
 104         BPF_SK_LOOKUP(skc_lookup_tcp),
 105         /* reference in r0 may be NULL */
 106         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 107         BPF_MOV64_IMM(BPF_REG_2, 0),
 108         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 109         BPF_EXIT_INSN(),
 110         },
 111         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 112         .errstr = "type=sock_common_or_null expected=sock",
 113         .result = REJECT,
 114 },
 115 {
 116         "reference tracking: release reference",
 117         .insns = {
 118         BPF_SK_LOOKUP(sk_lookup_tcp),
 119         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 120         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 121         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 122         BPF_EXIT_INSN(),
 123         },
 124         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 125         .result = ACCEPT,
 126 },
 127 {
 128         "reference tracking: release reference to sock_common",
 129         .insns = {
 130         BPF_SK_LOOKUP(skc_lookup_tcp),
 131         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 132         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 133         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 134         BPF_EXIT_INSN(),
 135         },
 136         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 137         .result = ACCEPT,
 138 },
 139 {
 140         "reference tracking: release reference 2",
 141         .insns = {
 142         BPF_SK_LOOKUP(sk_lookup_tcp),
 143         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 144         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 145         BPF_EXIT_INSN(),
 146         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 147         BPF_EXIT_INSN(),
 148         },
 149         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 150         .result = ACCEPT,
 151 },
 152 {
 153         "reference tracking: release reference twice",
 154         .insns = {
 155         BPF_SK_LOOKUP(sk_lookup_tcp),
 156         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 157         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 158         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 159         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 160         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 161         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 162         BPF_EXIT_INSN(),
 163         },
 164         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 165         .errstr = "type=inv expected=sock",
 166         .result = REJECT,
 167 },
 168 {
 169         "reference tracking: release reference twice inside branch",
 170         .insns = {
 171         BPF_SK_LOOKUP(sk_lookup_tcp),
 172         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 173         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 174         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
 175         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 176         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 177         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 178         BPF_EXIT_INSN(),
 179         },
 180         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 181         .errstr = "type=inv expected=sock",
 182         .result = REJECT,
 183 },
 184 {
 185         "reference tracking: alloc, check, free in one subbranch",
 186         .insns = {
 187         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 188                     offsetof(struct __sk_buff, data)),
 189         BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 190                     offsetof(struct __sk_buff, data_end)),
 191         BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 192         BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
 193         /* if (offsetof(skb, mark) > data_len) exit; */
 194         BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
 195         BPF_EXIT_INSN(),
 196         BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
 197                     offsetof(struct __sk_buff, mark)),
 198         BPF_SK_LOOKUP(sk_lookup_tcp),
 199         BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
 200         /* Leak reference in R0 */
 201         BPF_EXIT_INSN(),
 202         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
 203         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 204         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 205         BPF_EXIT_INSN(),
 206         },
 207         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 208         .errstr = "Unreleased reference",
 209         .result = REJECT,
 210         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 211 },
 212 {
 213         "reference tracking: alloc, check, free in both subbranches",
 214         .insns = {
 215         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 216                     offsetof(struct __sk_buff, data)),
 217         BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 218                     offsetof(struct __sk_buff, data_end)),
 219         BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 220         BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
 221         /* if (offsetof(skb, mark) > data_len) exit; */
 222         BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
 223         BPF_EXIT_INSN(),
 224         BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
 225                     offsetof(struct __sk_buff, mark)),
 226         BPF_SK_LOOKUP(sk_lookup_tcp),
 227         BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
 228         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
 229         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 230         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 231         BPF_EXIT_INSN(),
 232         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
 233         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 234         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 235         BPF_EXIT_INSN(),
 236         },
 237         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 238         .result = ACCEPT,
 239         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 240 },
 241 {
 242         "reference tracking in call: free reference in subprog",
 243         .insns = {
 244         BPF_SK_LOOKUP(sk_lookup_tcp),
 245         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
 246         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
 247         BPF_MOV64_IMM(BPF_REG_0, 0),
 248         BPF_EXIT_INSN(),
 249 
 250         /* subprog 1 */
 251         BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
 252         BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
 253         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 254         BPF_EXIT_INSN(),
 255         },
 256         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 257         .result = ACCEPT,
 258 },
 259 {
 260         "reference tracking in call: free reference in subprog and outside",
 261         .insns = {
 262         BPF_SK_LOOKUP(sk_lookup_tcp),
 263         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
 264         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 265         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
 266         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 267         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 268         BPF_EXIT_INSN(),
 269 
 270         /* subprog 1 */
 271         BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
 272         BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
 273         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 274         BPF_EXIT_INSN(),
 275         },
 276         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 277         .errstr = "type=inv expected=sock",
 278         .result = REJECT,
 279 },
 280 {
 281         "reference tracking in call: alloc & leak reference in subprog",
 282         .insns = {
 283         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
 284         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
 285         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
 286         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 287         BPF_MOV64_IMM(BPF_REG_0, 0),
 288         BPF_EXIT_INSN(),
 289 
 290         /* subprog 1 */
 291         BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
 292         BPF_SK_LOOKUP(sk_lookup_tcp),
 293         /* spill unchecked sk_ptr into stack of caller */
 294         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
 295         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 296         BPF_EXIT_INSN(),
 297         },
 298         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 299         .errstr = "Unreleased reference",
 300         .result = REJECT,
 301 },
 302 {
 303         "reference tracking in call: alloc in subprog, release outside",
 304         .insns = {
 305         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
 306         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
 307         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 308         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 309         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 310         BPF_EXIT_INSN(),
 311 
 312         /* subprog 1 */
 313         BPF_SK_LOOKUP(sk_lookup_tcp),
 314         BPF_EXIT_INSN(), /* return sk */
 315         },
 316         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 317         .retval = POINTER_VALUE,
 318         .result = ACCEPT,
 319 },
 320 {
 321         "reference tracking in call: sk_ptr leak into caller stack",
 322         .insns = {
 323         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
 324         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
 325         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
 326         BPF_MOV64_IMM(BPF_REG_0, 0),
 327         BPF_EXIT_INSN(),
 328 
 329         /* subprog 1 */
 330         BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
 331         BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
 332         BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
 333         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
 334         /* spill unchecked sk_ptr into stack of caller */
 335         BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
 336         BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
 337         BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
 338         BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
 339         BPF_EXIT_INSN(),
 340 
 341         /* subprog 2 */
 342         BPF_SK_LOOKUP(sk_lookup_tcp),
 343         BPF_EXIT_INSN(),
 344         },
 345         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 346         .errstr = "Unreleased reference",
 347         .result = REJECT,
 348 },
 349 {
 350         "reference tracking in call: sk_ptr spill into caller stack",
 351         .insns = {
 352         BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
 353         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
 354         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
 355         BPF_MOV64_IMM(BPF_REG_0, 0),
 356         BPF_EXIT_INSN(),
 357 
 358         /* subprog 1 */
 359         BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
 360         BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
 361         BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
 362         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
 363         /* spill unchecked sk_ptr into stack of caller */
 364         BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
 365         BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
 366         BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
 367         BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
 368         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
 369         /* now the sk_ptr is verified, free the reference */
 370         BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
 371         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 372         BPF_EXIT_INSN(),
 373 
 374         /* subprog 2 */
 375         BPF_SK_LOOKUP(sk_lookup_tcp),
 376         BPF_EXIT_INSN(),
 377         },
 378         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 379         .result = ACCEPT,
 380 },
 381 {
 382         "reference tracking: allow LD_ABS",
 383         .insns = {
 384         BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
 385         BPF_SK_LOOKUP(sk_lookup_tcp),
 386         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 387         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 388         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 389         BPF_LD_ABS(BPF_B, 0),
 390         BPF_LD_ABS(BPF_H, 0),
 391         BPF_LD_ABS(BPF_W, 0),
 392         BPF_EXIT_INSN(),
 393         },
 394         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 395         .result = ACCEPT,
 396 },
 397 {
 398         "reference tracking: forbid LD_ABS while holding reference",
 399         .insns = {
 400         BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
 401         BPF_SK_LOOKUP(sk_lookup_tcp),
 402         BPF_LD_ABS(BPF_B, 0),
 403         BPF_LD_ABS(BPF_H, 0),
 404         BPF_LD_ABS(BPF_W, 0),
 405         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 406         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 407         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 408         BPF_EXIT_INSN(),
 409         },
 410         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 411         .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
 412         .result = REJECT,
 413 },
 414 {
 415         "reference tracking: allow LD_IND",
 416         .insns = {
 417         BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
 418         BPF_SK_LOOKUP(sk_lookup_tcp),
 419         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 420         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 421         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 422         BPF_MOV64_IMM(BPF_REG_7, 1),
 423         BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
 424         BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
 425         BPF_EXIT_INSN(),
 426         },
 427         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 428         .result = ACCEPT,
 429         .retval = 1,
 430 },
 431 {
 432         "reference tracking: forbid LD_IND while holding reference",
 433         .insns = {
 434         BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
 435         BPF_SK_LOOKUP(sk_lookup_tcp),
 436         BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
 437         BPF_MOV64_IMM(BPF_REG_7, 1),
 438         BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
 439         BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
 440         BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
 441         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
 442         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 443         BPF_EXIT_INSN(),
 444         },
 445         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 446         .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
 447         .result = REJECT,
 448 },
 449 {
 450         "reference tracking: check reference or tail call",
 451         .insns = {
 452         BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
 453         BPF_SK_LOOKUP(sk_lookup_tcp),
 454         /* if (sk) bpf_sk_release() */
 455         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 456         BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
 457         /* bpf_tail_call() */
 458         BPF_MOV64_IMM(BPF_REG_3, 2),
 459         BPF_LD_MAP_FD(BPF_REG_2, 0),
 460         BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
 461         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
 462         BPF_MOV64_IMM(BPF_REG_0, 0),
 463         BPF_EXIT_INSN(),
 464         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 465         BPF_EXIT_INSN(),
 466         },
 467         .fixup_prog1 = { 17 },
 468         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 469         .result = ACCEPT,
 470 },
 471 {
 472         "reference tracking: release reference then tail call",
 473         .insns = {
 474         BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
 475         BPF_SK_LOOKUP(sk_lookup_tcp),
 476         /* if (sk) bpf_sk_release() */
 477         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 478         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
 479         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 480         /* bpf_tail_call() */
 481         BPF_MOV64_IMM(BPF_REG_3, 2),
 482         BPF_LD_MAP_FD(BPF_REG_2, 0),
 483         BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
 484         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
 485         BPF_MOV64_IMM(BPF_REG_0, 0),
 486         BPF_EXIT_INSN(),
 487         },
 488         .fixup_prog1 = { 18 },
 489         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 490         .result = ACCEPT,
 491 },
 492 {
 493         "reference tracking: leak possible reference over tail call",
 494         .insns = {
 495         BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
 496         /* Look up socket and store in REG_6 */
 497         BPF_SK_LOOKUP(sk_lookup_tcp),
 498         /* bpf_tail_call() */
 499         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 500         BPF_MOV64_IMM(BPF_REG_3, 2),
 501         BPF_LD_MAP_FD(BPF_REG_2, 0),
 502         BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
 503         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
 504         BPF_MOV64_IMM(BPF_REG_0, 0),
 505         /* if (sk) bpf_sk_release() */
 506         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 507         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
 508         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 509         BPF_EXIT_INSN(),
 510         },
 511         .fixup_prog1 = { 16 },
 512         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 513         .errstr = "tail_call would lead to reference leak",
 514         .result = REJECT,
 515 },
 516 {
 517         "reference tracking: leak checked reference over tail call",
 518         .insns = {
 519         BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
 520         /* Look up socket and store in REG_6 */
 521         BPF_SK_LOOKUP(sk_lookup_tcp),
 522         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 523         /* if (!sk) goto end */
 524         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
 525         /* bpf_tail_call() */
 526         BPF_MOV64_IMM(BPF_REG_3, 0),
 527         BPF_LD_MAP_FD(BPF_REG_2, 0),
 528         BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
 529         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
 530         BPF_MOV64_IMM(BPF_REG_0, 0),
 531         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 532         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 533         BPF_EXIT_INSN(),
 534         },
 535         .fixup_prog1 = { 17 },
 536         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 537         .errstr = "tail_call would lead to reference leak",
 538         .result = REJECT,
 539 },
 540 {
 541         "reference tracking: mangle and release sock_or_null",
 542         .insns = {
 543         BPF_SK_LOOKUP(sk_lookup_tcp),
 544         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 545         BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
 546         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 547         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 548         BPF_EXIT_INSN(),
 549         },
 550         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 551         .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
 552         .result = REJECT,
 553 },
 554 {
 555         "reference tracking: mangle and release sock",
 556         .insns = {
 557         BPF_SK_LOOKUP(sk_lookup_tcp),
 558         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 559         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
 560         BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
 561         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 562         BPF_EXIT_INSN(),
 563         },
 564         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 565         .errstr = "R1 pointer arithmetic on sock prohibited",
 566         .result = REJECT,
 567 },
 568 {
 569         "reference tracking: access member",
 570         .insns = {
 571         BPF_SK_LOOKUP(sk_lookup_tcp),
 572         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 573         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
 574         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
 575         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 576         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 577         BPF_EXIT_INSN(),
 578         },
 579         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 580         .result = ACCEPT,
 581 },
 582 {
 583         "reference tracking: write to member",
 584         .insns = {
 585         BPF_SK_LOOKUP(sk_lookup_tcp),
 586         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 587         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
 588         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 589         BPF_LD_IMM64(BPF_REG_2, 42),
 590         BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
 591                     offsetof(struct bpf_sock, mark)),
 592         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 593         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 594         BPF_LD_IMM64(BPF_REG_0, 0),
 595         BPF_EXIT_INSN(),
 596         },
 597         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 598         .errstr = "cannot write into sock",
 599         .result = REJECT,
 600 },
 601 {
 602         "reference tracking: invalid 64-bit access of member",
 603         .insns = {
 604         BPF_SK_LOOKUP(sk_lookup_tcp),
 605         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 606         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
 607         BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
 608         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 609         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 610         BPF_EXIT_INSN(),
 611         },
 612         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 613         .errstr = "invalid sock access off=0 size=8",
 614         .result = REJECT,
 615 },
 616 {
 617         "reference tracking: access after release",
 618         .insns = {
 619         BPF_SK_LOOKUP(sk_lookup_tcp),
 620         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 621         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
 622         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 623         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
 624         BPF_EXIT_INSN(),
 625         },
 626         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 627         .errstr = "!read_ok",
 628         .result = REJECT,
 629 },
 630 {
 631         "reference tracking: direct access for lookup",
 632         .insns = {
 633         /* Check that the packet is at least 64B long */
 634         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 635                     offsetof(struct __sk_buff, data)),
 636         BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 637                     offsetof(struct __sk_buff, data_end)),
 638         BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 639         BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
 640         BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
 641         /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
 642         BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
 643         BPF_MOV64_IMM(BPF_REG_4, 0),
 644         BPF_MOV64_IMM(BPF_REG_5, 0),
 645         BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
 646         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 647         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
 648         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
 649         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 650         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 651         BPF_EXIT_INSN(),
 652         },
 653         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 654         .result = ACCEPT,
 655 },
 656 {
 657         "reference tracking: use ptr from bpf_tcp_sock() after release",
 658         .insns = {
 659         BPF_SK_LOOKUP(sk_lookup_tcp),
 660         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 661         BPF_EXIT_INSN(),
 662         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 663         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 664         BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
 665         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 666         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 667         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 668         BPF_EXIT_INSN(),
 669         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
 670         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 671         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 672         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
 673         BPF_EXIT_INSN(),
 674         },
 675         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 676         .result = REJECT,
 677         .errstr = "invalid mem access",
 678 },
 679 {
 680         "reference tracking: use ptr from bpf_sk_fullsock() after release",
 681         .insns = {
 682         BPF_SK_LOOKUP(sk_lookup_tcp),
 683         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 684         BPF_EXIT_INSN(),
 685         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 686         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 687         BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
 688         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 689         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 690         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 691         BPF_EXIT_INSN(),
 692         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
 693         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 694         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 695         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
 696         BPF_EXIT_INSN(),
 697         },
 698         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 699         .result = REJECT,
 700         .errstr = "invalid mem access",
 701 },
 702 {
 703         "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
 704         .insns = {
 705         BPF_SK_LOOKUP(sk_lookup_tcp),
 706         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 707         BPF_EXIT_INSN(),
 708         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 709         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 710         BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
 711         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 712         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 713         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 714         BPF_EXIT_INSN(),
 715         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 716         BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
 717         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 718         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 719         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 720         BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
 721         BPF_EXIT_INSN(),
 722         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
 723         BPF_EXIT_INSN(),
 724         },
 725         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 726         .result = REJECT,
 727         .errstr = "invalid mem access",
 728 },
 729 {
 730         "reference tracking: use sk after bpf_sk_release(tp)",
 731         .insns = {
 732         BPF_SK_LOOKUP(sk_lookup_tcp),
 733         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 734         BPF_EXIT_INSN(),
 735         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 736         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 737         BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
 738         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 739         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 740         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 741         BPF_EXIT_INSN(),
 742         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 743         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 744         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
 745         BPF_EXIT_INSN(),
 746         },
 747         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 748         .result = REJECT,
 749         .errstr = "invalid mem access",
 750 },
 751 {
 752         "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
 753         .insns = {
 754         BPF_SK_LOOKUP(sk_lookup_tcp),
 755         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 756         BPF_EXIT_INSN(),
 757         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 758         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 759         BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
 760         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 761         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 762         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 763         BPF_EXIT_INSN(),
 764         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 765         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 766         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 767         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
 768         BPF_EXIT_INSN(),
 769         },
 770         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 771         .result = ACCEPT,
 772 },
 773 {
 774         "reference tracking: bpf_sk_release(listen_sk)",
 775         .insns = {
 776         BPF_SK_LOOKUP(sk_lookup_tcp),
 777         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 778         BPF_EXIT_INSN(),
 779         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 780         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 781         BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
 782         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
 783         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 784         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 785         BPF_EXIT_INSN(),
 786         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 787         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 788         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
 789         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 790         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 791         BPF_EXIT_INSN(),
 792         },
 793         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 794         .result = REJECT,
 795         .errstr = "reference has not been acquired before",
 796 },
 797 {
 798         /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
 799         "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
 800         .insns = {
 801         BPF_SK_LOOKUP(sk_lookup_tcp),
 802         BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 803         BPF_EXIT_INSN(),
 804         BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 805         BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 806         BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
 807         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
 808         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 809         BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
 810         BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
 811         BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
 812         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 813         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 814         BPF_EXIT_INSN(),
 815         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
 816         BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 817         BPF_EMIT_CALL(BPF_FUNC_sk_release),
 818         BPF_EXIT_INSN(),
 819         },
 820         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 821         .result = REJECT,
 822         .errstr = "invalid mem access",
 823 },

/* [<][>][^][v][top][bottom][index][help] */