1/* eBPF mini library */
2#ifndef __LIBBPF_H
3#define __LIBBPF_H
4
5struct bpf_insn;
6
7int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
8		   int max_entries);
9int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
10int bpf_lookup_elem(int fd, void *key, void *value);
11int bpf_delete_elem(int fd, void *key);
12int bpf_get_next_key(int fd, void *key, void *next_key);
13
14int bpf_prog_load(enum bpf_prog_type prog_type,
15		  const struct bpf_insn *insns, int insn_len,
16		  const char *license, int kern_version);
17
18#define LOG_BUF_SIZE 65536
19extern char bpf_log_buf[LOG_BUF_SIZE];
20
21/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
22
23#define BPF_ALU64_REG(OP, DST, SRC)				\
24	((struct bpf_insn) {					\
25		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
26		.dst_reg = DST,					\
27		.src_reg = SRC,					\
28		.off   = 0,					\
29		.imm   = 0 })
30
31#define BPF_ALU32_REG(OP, DST, SRC)				\
32	((struct bpf_insn) {					\
33		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
34		.dst_reg = DST,					\
35		.src_reg = SRC,					\
36		.off   = 0,					\
37		.imm   = 0 })
38
39/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
40
41#define BPF_ALU64_IMM(OP, DST, IMM)				\
42	((struct bpf_insn) {					\
43		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
44		.dst_reg = DST,					\
45		.src_reg = 0,					\
46		.off   = 0,					\
47		.imm   = IMM })
48
49#define BPF_ALU32_IMM(OP, DST, IMM)				\
50	((struct bpf_insn) {					\
51		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
52		.dst_reg = DST,					\
53		.src_reg = 0,					\
54		.off   = 0,					\
55		.imm   = IMM })
56
57/* Short form of mov, dst_reg = src_reg */
58
59#define BPF_MOV64_REG(DST, SRC)					\
60	((struct bpf_insn) {					\
61		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
62		.dst_reg = DST,					\
63		.src_reg = SRC,					\
64		.off   = 0,					\
65		.imm   = 0 })
66
67/* Short form of mov, dst_reg = imm32 */
68
69#define BPF_MOV64_IMM(DST, IMM)					\
70	((struct bpf_insn) {					\
71		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
72		.dst_reg = DST,					\
73		.src_reg = 0,					\
74		.off   = 0,					\
75		.imm   = IMM })
76
77/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
78#define BPF_LD_IMM64(DST, IMM)					\
79	BPF_LD_IMM64_RAW(DST, 0, IMM)
80
81#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
82	((struct bpf_insn) {					\
83		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
84		.dst_reg = DST,					\
85		.src_reg = SRC,					\
86		.off   = 0,					\
87		.imm   = (__u32) (IMM) }),			\
88	((struct bpf_insn) {					\
89		.code  = 0, /* zero is reserved opcode */	\
90		.dst_reg = 0,					\
91		.src_reg = 0,					\
92		.off   = 0,					\
93		.imm   = ((__u64) (IMM)) >> 32 })
94
95#ifndef BPF_PSEUDO_MAP_FD
96# define BPF_PSEUDO_MAP_FD	1
97#endif
98
99/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
100#define BPF_LD_MAP_FD(DST, MAP_FD)				\
101	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
102
103
104/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
105
106#define BPF_LD_ABS(SIZE, IMM)					\
107	((struct bpf_insn) {					\
108		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
109		.dst_reg = 0,					\
110		.src_reg = 0,					\
111		.off   = 0,					\
112		.imm   = IMM })
113
114/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
115
116#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
117	((struct bpf_insn) {					\
118		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
119		.dst_reg = DST,					\
120		.src_reg = SRC,					\
121		.off   = OFF,					\
122		.imm   = 0 })
123
124/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
125
126#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
127	((struct bpf_insn) {					\
128		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
129		.dst_reg = DST,					\
130		.src_reg = SRC,					\
131		.off   = OFF,					\
132		.imm   = 0 })
133
134/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
135
136#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
137	((struct bpf_insn) {					\
138		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
139		.dst_reg = DST,					\
140		.src_reg = 0,					\
141		.off   = OFF,					\
142		.imm   = IMM })
143
144/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
145
146#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
147	((struct bpf_insn) {					\
148		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
149		.dst_reg = DST,					\
150		.src_reg = SRC,					\
151		.off   = OFF,					\
152		.imm   = 0 })
153
154/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
155
156#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
157	((struct bpf_insn) {					\
158		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
159		.dst_reg = DST,					\
160		.src_reg = 0,					\
161		.off   = OFF,					\
162		.imm   = IMM })
163
164/* Raw code statement block */
165
166#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
167	((struct bpf_insn) {					\
168		.code  = CODE,					\
169		.dst_reg = DST,					\
170		.src_reg = SRC,					\
171		.off   = OFF,					\
172		.imm   = IMM })
173
174/* Program exit */
175
176#define BPF_EXIT_INSN()						\
177	((struct bpf_insn) {					\
178		.code  = BPF_JMP | BPF_EXIT,			\
179		.dst_reg = 0,					\
180		.src_reg = 0,					\
181		.off   = 0,					\
182		.imm   = 0 })
183
184/* create RAW socket and bind to interface 'name' */
185int open_raw_sock(const char *name);
186
187struct perf_event_attr;
188int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
189		    int group_fd, unsigned long flags);
190#endif
191