root/tools/testing/selftests/bpf/verifier/raw_stack.c

/* [<][>][^][v][top][bottom][index][help] */
   1 {
   2         "raw_stack: no skb_load_bytes",
   3         .insns = {
   4         BPF_MOV64_IMM(BPF_REG_2, 4),
   5         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
   6         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
   7         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
   8         BPF_MOV64_IMM(BPF_REG_4, 8),
   9         /* Call to skb_load_bytes() omitted. */
  10         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  11         BPF_EXIT_INSN(),
  12         },
  13         .result = REJECT,
  14         .errstr = "invalid read from stack off -8+0 size 8",
  15         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  16 },
  17 {
  18         "raw_stack: skb_load_bytes, negative len",
  19         .insns = {
  20         BPF_MOV64_IMM(BPF_REG_2, 4),
  21         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  22         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  23         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  24         BPF_MOV64_IMM(BPF_REG_4, -8),
  25         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  26         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  27         BPF_EXIT_INSN(),
  28         },
  29         .result = REJECT,
  30         .errstr = "R4 min value is negative",
  31         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  32 },
  33 {
  34         "raw_stack: skb_load_bytes, negative len 2",
  35         .insns = {
  36         BPF_MOV64_IMM(BPF_REG_2, 4),
  37         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  38         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  39         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  40         BPF_MOV64_IMM(BPF_REG_4, ~0),
  41         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  42         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  43         BPF_EXIT_INSN(),
  44         },
  45         .result = REJECT,
  46         .errstr = "R4 min value is negative",
  47         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  48 },
  49 {
  50         "raw_stack: skb_load_bytes, zero len",
  51         .insns = {
  52         BPF_MOV64_IMM(BPF_REG_2, 4),
  53         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  54         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  55         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  56         BPF_MOV64_IMM(BPF_REG_4, 0),
  57         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  58         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  59         BPF_EXIT_INSN(),
  60         },
  61         .result = REJECT,
  62         .errstr = "invalid stack type R3",
  63         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  64 },
  65 {
  66         "raw_stack: skb_load_bytes, no init",
  67         .insns = {
  68         BPF_MOV64_IMM(BPF_REG_2, 4),
  69         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  70         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  71         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  72         BPF_MOV64_IMM(BPF_REG_4, 8),
  73         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  74         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  75         BPF_EXIT_INSN(),
  76         },
  77         .result = ACCEPT,
  78         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  79 },
  80 {
  81         "raw_stack: skb_load_bytes, init",
  82         .insns = {
  83         BPF_MOV64_IMM(BPF_REG_2, 4),
  84         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  85         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  86         BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  87         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  88         BPF_MOV64_IMM(BPF_REG_4, 8),
  89         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  90         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  91         BPF_EXIT_INSN(),
  92         },
  93         .result = ACCEPT,
  94         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  95 },
  96 {
  97         "raw_stack: skb_load_bytes, spilled regs around bounds",
  98         .insns = {
  99         BPF_MOV64_IMM(BPF_REG_2, 4),
 100         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 101         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
 102         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
 103         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
 104         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 105         BPF_MOV64_IMM(BPF_REG_4, 8),
 106         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 107         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
 108         BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
 109         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
 110                     offsetof(struct __sk_buff, mark)),
 111         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
 112                     offsetof(struct __sk_buff, priority)),
 113         BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
 114         BPF_EXIT_INSN(),
 115         },
 116         .result = ACCEPT,
 117         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 118 },
 119 {
 120         "raw_stack: skb_load_bytes, spilled regs corruption",
 121         .insns = {
 122         BPF_MOV64_IMM(BPF_REG_2, 4),
 123         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 124         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
 125         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
 126         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 127         BPF_MOV64_IMM(BPF_REG_4, 8),
 128         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 129         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 130         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
 131                     offsetof(struct __sk_buff, mark)),
 132         BPF_EXIT_INSN(),
 133         },
 134         .result = REJECT,
 135         .errstr = "R0 invalid mem access 'inv'",
 136         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 137         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 138 },
 139 {
 140         "raw_stack: skb_load_bytes, spilled regs corruption 2",
 141         .insns = {
 142         BPF_MOV64_IMM(BPF_REG_2, 4),
 143         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 144         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
 145         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
 146         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
 147         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
 148         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 149         BPF_MOV64_IMM(BPF_REG_4, 8),
 150         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 151         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
 152         BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
 153         BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
 154         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
 155                     offsetof(struct __sk_buff, mark)),
 156         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
 157                     offsetof(struct __sk_buff, priority)),
 158         BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
 159         BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
 160                     offsetof(struct __sk_buff, pkt_type)),
 161         BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
 162         BPF_EXIT_INSN(),
 163         },
 164         .result = REJECT,
 165         .errstr = "R3 invalid mem access 'inv'",
 166         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 167         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 168 },
 169 {
 170         "raw_stack: skb_load_bytes, spilled regs + data",
 171         .insns = {
 172         BPF_MOV64_IMM(BPF_REG_2, 4),
 173         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 174         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
 175         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
 176         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
 177         BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
 178         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 179         BPF_MOV64_IMM(BPF_REG_4, 8),
 180         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 181         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
 182         BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
 183         BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
 184         BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
 185                     offsetof(struct __sk_buff, mark)),
 186         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
 187                     offsetof(struct __sk_buff, priority)),
 188         BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
 189         BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
 190         BPF_EXIT_INSN(),
 191         },
 192         .result = ACCEPT,
 193         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 194 },
 195 {
 196         "raw_stack: skb_load_bytes, invalid access 1",
 197         .insns = {
 198         BPF_MOV64_IMM(BPF_REG_2, 4),
 199         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 200         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
 201         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 202         BPF_MOV64_IMM(BPF_REG_4, 8),
 203         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 204         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 205         BPF_EXIT_INSN(),
 206         },
 207         .result = REJECT,
 208         .errstr = "invalid stack type R3 off=-513 access_size=8",
 209         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 210 },
 211 {
 212         "raw_stack: skb_load_bytes, invalid access 2",
 213         .insns = {
 214         BPF_MOV64_IMM(BPF_REG_2, 4),
 215         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 216         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
 217         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 218         BPF_MOV64_IMM(BPF_REG_4, 8),
 219         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 220         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 221         BPF_EXIT_INSN(),
 222         },
 223         .result = REJECT,
 224         .errstr = "invalid stack type R3 off=-1 access_size=8",
 225         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 226 },
 227 {
 228         "raw_stack: skb_load_bytes, invalid access 3",
 229         .insns = {
 230         BPF_MOV64_IMM(BPF_REG_2, 4),
 231         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 232         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
 233         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 234         BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
 235         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 236         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 237         BPF_EXIT_INSN(),
 238         },
 239         .result = REJECT,
 240         .errstr = "R4 min value is negative",
 241         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 242 },
 243 {
 244         "raw_stack: skb_load_bytes, invalid access 4",
 245         .insns = {
 246         BPF_MOV64_IMM(BPF_REG_2, 4),
 247         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 248         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
 249         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 250         BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
 251         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 252         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 253         BPF_EXIT_INSN(),
 254         },
 255         .result = REJECT,
 256         .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
 257         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 258 },
 259 {
 260         "raw_stack: skb_load_bytes, invalid access 5",
 261         .insns = {
 262         BPF_MOV64_IMM(BPF_REG_2, 4),
 263         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 264         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
 265         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 266         BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
 267         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 268         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 269         BPF_EXIT_INSN(),
 270         },
 271         .result = REJECT,
 272         .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
 273         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 274 },
 275 {
 276         "raw_stack: skb_load_bytes, invalid access 6",
 277         .insns = {
 278         BPF_MOV64_IMM(BPF_REG_2, 4),
 279         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 280         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
 281         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 282         BPF_MOV64_IMM(BPF_REG_4, 0),
 283         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 284         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 285         BPF_EXIT_INSN(),
 286         },
 287         .result = REJECT,
 288         .errstr = "invalid stack type R3 off=-512 access_size=0",
 289         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 290 },
 291 {
 292         "raw_stack: skb_load_bytes, large access",
 293         .insns = {
 294         BPF_MOV64_IMM(BPF_REG_2, 4),
 295         BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
 296         BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
 297         BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
 298         BPF_MOV64_IMM(BPF_REG_4, 512),
 299         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
 300         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
 301         BPF_EXIT_INSN(),
 302         },
 303         .result = ACCEPT,
 304         .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 305 },

/* [<][>][^][v][top][bottom][index][help] */