root/arch/arm64/net/bpf_jit.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * BPF JIT compiler for ARM64
   4  *
   5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
   6  */
   7 #ifndef _BPF_JIT_H
   8 #define _BPF_JIT_H
   9 
  10 #include <asm/insn.h>
  11 
  12 /* 5-bit Register Operand */
  13 #define A64_R(x)        AARCH64_INSN_REG_##x
  14 #define A64_FP          AARCH64_INSN_REG_FP
  15 #define A64_LR          AARCH64_INSN_REG_LR
  16 #define A64_ZR          AARCH64_INSN_REG_ZR
  17 #define A64_SP          AARCH64_INSN_REG_SP
  18 
  19 #define A64_VARIANT(sf) \
  20         ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
  21 
  22 /* Compare & branch (immediate) */
  23 #define A64_COMP_BRANCH(sf, Rt, offset, type) \
  24         aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
  25                 AARCH64_INSN_BRANCH_COMP_##type)
  26 #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
  27 #define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
  28 
  29 /* Conditional branch (immediate) */
  30 #define A64_COND_BRANCH(cond, offset) \
  31         aarch64_insn_gen_cond_branch_imm(0, offset, cond)
  32 #define A64_COND_EQ     AARCH64_INSN_COND_EQ /* == */
  33 #define A64_COND_NE     AARCH64_INSN_COND_NE /* != */
  34 #define A64_COND_CS     AARCH64_INSN_COND_CS /* unsigned >= */
  35 #define A64_COND_HI     AARCH64_INSN_COND_HI /* unsigned > */
  36 #define A64_COND_LS     AARCH64_INSN_COND_LS /* unsigned <= */
  37 #define A64_COND_CC     AARCH64_INSN_COND_CC /* unsigned < */
  38 #define A64_COND_GE     AARCH64_INSN_COND_GE /* signed >= */
  39 #define A64_COND_GT     AARCH64_INSN_COND_GT /* signed > */
  40 #define A64_COND_LE     AARCH64_INSN_COND_LE /* signed <= */
  41 #define A64_COND_LT     AARCH64_INSN_COND_LT /* signed < */
  42 #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
  43 
  44 /* Unconditional branch (immediate) */
  45 #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
  46         AARCH64_INSN_BRANCH_##type)
  47 #define A64_B(imm26)  A64_BRANCH((imm26) << 2, NOLINK)
  48 #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
  49 
  50 /* Unconditional branch (register) */
  51 #define A64_BR(Rn)  aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
  52 #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
  53 #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
  54 
  55 /* Load/store register (register offset) */
  56 #define A64_LS_REG(Rt, Rn, Rm, size, type) \
  57         aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
  58                 AARCH64_INSN_SIZE_##size, \
  59                 AARCH64_INSN_LDST_##type##_REG_OFFSET)
  60 #define A64_STRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, STORE)
  61 #define A64_LDRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
  62 #define A64_STRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, STORE)
  63 #define A64_LDRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
  64 #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
  65 #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
  66 #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
  67 #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
  68 
  69 /* Load/store register pair */
  70 #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
  71         aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
  72                 AARCH64_INSN_VARIANT_64BIT, \
  73                 AARCH64_INSN_LDST_##ls##_PAIR_##type)
  74 /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
  75 #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
  76 /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
  77 #define A64_POP(Rt, Rt2, Rn)  A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
  78 
  79 /* Load/store exclusive */
  80 #define A64_SIZE(sf) \
  81         ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
  82 #define A64_LSX(sf, Rt, Rn, Rs, type) \
  83         aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
  84                                        AARCH64_INSN_LDST_##type)
  85 /* Rt = [Rn]; (atomic) */
  86 #define A64_LDXR(sf, Rt, Rn) \
  87         A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
  88 /* [Rn] = Rt; (atomic) Rs = [state] */
  89 #define A64_STXR(sf, Rt, Rn, Rs) \
  90         A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
  91 
  92 /* LSE atomics */
  93 #define A64_STADD(sf, Rn, Rs) \
  94         aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
  95 
  96 /* Add/subtract (immediate) */
  97 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
  98         aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
  99                 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
 100 /* Rd = Rn OP imm12 */
 101 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
 102 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
 103 /* Rd = Rn */
 104 #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
 105 
 106 /* Bitfield move */
 107 #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
 108         aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
 109                 A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
 110 /* Signed, with sign replication to left and zeros to right */
 111 #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
 112 /* Unsigned, with zeros to left and right */
 113 #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
 114 
 115 /* Rd = Rn << shift */
 116 #define A64_LSL(sf, Rd, Rn, shift) ({   \
 117         int sz = (sf) ? 64 : 32;        \
 118         A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
 119 })
 120 /* Rd = Rn >> shift */
 121 #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
 122 /* Rd = Rn >> shift; signed */
 123 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
 124 
 125 /* Zero extend */
 126 #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
 127 #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
 128 
 129 /* Move wide (immediate) */
 130 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
 131         aarch64_insn_gen_movewide(Rd, imm16, shift, \
 132                 A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
 133 /* Rd = Zeros (for MOVZ);
 134  * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
 135  * Rd = ~Rd; (for MOVN); */
 136 #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
 137 #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
 138 #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
 139 
 140 /* Add/subtract (shifted register) */
 141 #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
 142         aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
 143                 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
 144 /* Rd = Rn OP Rm */
 145 #define A64_ADD(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
 146 #define A64_SUB(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
 147 #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
 148 /* Rd = -Rm */
 149 #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
 150 /* Rn - Rm; set condition flags */
 151 #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
 152 
 153 /* Data-processing (1 source) */
 154 #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
 155         A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
 156 /* Rd = BSWAPx(Rn) */
 157 #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
 158 #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
 159 #define A64_REV64(Rd, Rn)     A64_DATA1(1, Rd, Rn, REVERSE_64)
 160 
 161 /* Data-processing (2 source) */
 162 /* Rd = Rn OP Rm */
 163 #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
 164         A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
 165 #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
 166 #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
 167 #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
 168 #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
 169 
 170 /* Data-processing (3 source) */
 171 /* Rd = Ra + Rn * Rm */
 172 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
 173         A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
 174 /* Rd = Ra - Rn * Rm */
 175 #define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
 176         A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB)
 177 /* Rd = Rn * Rm */
 178 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
 179 
 180 /* Logical (shifted register) */
 181 #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
 182         aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
 183                 A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
 184 /* Rd = Rn OP Rm */
 185 #define A64_AND(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
 186 #define A64_ORR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
 187 #define A64_EOR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
 188 #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
 189 /* Rn & Rm; set condition flags */
 190 #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
 191 
 192 #endif /* _BPF_JIT_H */

/* [<][>][^][v][top][bottom][index][help] */