This source file includes following definitions.
- nft_payload_copy_vlan
- nft_payload_eval
- nft_payload_init
- nft_payload_dump
- nft_payload_offload_ll
- nft_payload_offload_ip
- nft_payload_offload_ip6
- nft_payload_offload_nh
- nft_payload_offload_tcp
- nft_payload_offload_udp
- nft_payload_offload_th
- nft_payload_offload
- nft_csum_replace
- nft_payload_udp_checksum
- nft_payload_l4csum_offset
- nft_payload_l4csum_update
- nft_payload_csum_inet
- nft_payload_set_eval
- nft_payload_set_init
- nft_payload_set_dump
- nft_payload_select_ops
1
2
3
4
5
6
7
8
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25
26
27 static bool
28 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
29 {
30 int mac_off = skb_mac_header(skb) - skb->data;
31 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
32 struct vlan_ethhdr veth;
33
34 vlanh = (u8 *) &veth;
35 if (offset < ETH_HLEN) {
36 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
37
38 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
39 return false;
40
41 veth.h_vlan_proto = skb->vlan_proto;
42
43 memcpy(dst_u8, vlanh + offset, ethlen);
44
45 len -= ethlen;
46 if (len == 0)
47 return true;
48
49 dst_u8 += ethlen;
50 offset = ETH_HLEN;
51 } else if (offset >= VLAN_ETH_HLEN) {
52 offset -= VLAN_HLEN;
53 goto skip;
54 }
55
56 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
57 veth.h_vlan_encapsulated_proto = skb->protocol;
58
59 vlanh += offset;
60
61 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
62 memcpy(dst_u8, vlanh, vlan_len);
63
64 len -= vlan_len;
65 if (!len)
66 return true;
67
68 dst_u8 += vlan_len;
69 skip:
70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
71 }
72
73 void nft_payload_eval(const struct nft_expr *expr,
74 struct nft_regs *regs,
75 const struct nft_pktinfo *pkt)
76 {
77 const struct nft_payload *priv = nft_expr_priv(expr);
78 const struct sk_buff *skb = pkt->skb;
79 u32 *dest = ®s->data[priv->dreg];
80 int offset;
81
82 dest[priv->len / NFT_REG32_SIZE] = 0;
83 switch (priv->base) {
84 case NFT_PAYLOAD_LL_HEADER:
85 if (!skb_mac_header_was_set(skb))
86 goto err;
87
88 if (skb_vlan_tag_present(skb)) {
89 if (!nft_payload_copy_vlan(dest, skb,
90 priv->offset, priv->len))
91 goto err;
92 return;
93 }
94 offset = skb_mac_header(skb) - skb->data;
95 break;
96 case NFT_PAYLOAD_NETWORK_HEADER:
97 offset = skb_network_offset(skb);
98 break;
99 case NFT_PAYLOAD_TRANSPORT_HEADER:
100 if (!pkt->tprot_set)
101 goto err;
102 offset = pkt->xt.thoff;
103 break;
104 default:
105 BUG();
106 }
107 offset += priv->offset;
108
109 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
110 goto err;
111 return;
112 err:
113 regs->verdict.code = NFT_BREAK;
114 }
115
116 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
117 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
119 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
120 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
121 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
122 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
123 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
124 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
125 };
126
127 static int nft_payload_init(const struct nft_ctx *ctx,
128 const struct nft_expr *expr,
129 const struct nlattr * const tb[])
130 {
131 struct nft_payload *priv = nft_expr_priv(expr);
132
133 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
134 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
135 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
136 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
137
138 return nft_validate_register_store(ctx, priv->dreg, NULL,
139 NFT_DATA_VALUE, priv->len);
140 }
141
142 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
143 {
144 const struct nft_payload *priv = nft_expr_priv(expr);
145
146 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
147 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
148 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
149 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
150 goto nla_put_failure;
151 return 0;
152
153 nla_put_failure:
154 return -1;
155 }
156
157 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
158 struct nft_flow_rule *flow,
159 const struct nft_payload *priv)
160 {
161 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
162
163 switch (priv->offset) {
164 case offsetof(struct ethhdr, h_source):
165 if (priv->len != ETH_ALEN)
166 return -EOPNOTSUPP;
167
168 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
169 src, ETH_ALEN, reg);
170 break;
171 case offsetof(struct ethhdr, h_dest):
172 if (priv->len != ETH_ALEN)
173 return -EOPNOTSUPP;
174
175 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
176 dst, ETH_ALEN, reg);
177 break;
178 default:
179 return -EOPNOTSUPP;
180 }
181
182 return 0;
183 }
184
185 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
186 struct nft_flow_rule *flow,
187 const struct nft_payload *priv)
188 {
189 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
190
191 switch (priv->offset) {
192 case offsetof(struct iphdr, saddr):
193 if (priv->len != sizeof(struct in_addr))
194 return -EOPNOTSUPP;
195
196 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
197 sizeof(struct in_addr), reg);
198 break;
199 case offsetof(struct iphdr, daddr):
200 if (priv->len != sizeof(struct in_addr))
201 return -EOPNOTSUPP;
202
203 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
204 sizeof(struct in_addr), reg);
205 break;
206 case offsetof(struct iphdr, protocol):
207 if (priv->len != sizeof(__u8))
208 return -EOPNOTSUPP;
209
210 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
211 sizeof(__u8), reg);
212 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
213 break;
214 default:
215 return -EOPNOTSUPP;
216 }
217
218 return 0;
219 }
220
221 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
222 struct nft_flow_rule *flow,
223 const struct nft_payload *priv)
224 {
225 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
226
227 switch (priv->offset) {
228 case offsetof(struct ipv6hdr, saddr):
229 if (priv->len != sizeof(struct in6_addr))
230 return -EOPNOTSUPP;
231
232 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
233 sizeof(struct in6_addr), reg);
234 break;
235 case offsetof(struct ipv6hdr, daddr):
236 if (priv->len != sizeof(struct in6_addr))
237 return -EOPNOTSUPP;
238
239 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
240 sizeof(struct in6_addr), reg);
241 break;
242 case offsetof(struct ipv6hdr, nexthdr):
243 if (priv->len != sizeof(__u8))
244 return -EOPNOTSUPP;
245
246 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
247 sizeof(__u8), reg);
248 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
249 break;
250 default:
251 return -EOPNOTSUPP;
252 }
253
254 return 0;
255 }
256
257 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
258 struct nft_flow_rule *flow,
259 const struct nft_payload *priv)
260 {
261 int err;
262
263 switch (ctx->dep.l3num) {
264 case htons(ETH_P_IP):
265 err = nft_payload_offload_ip(ctx, flow, priv);
266 break;
267 case htons(ETH_P_IPV6):
268 err = nft_payload_offload_ip6(ctx, flow, priv);
269 break;
270 default:
271 return -EOPNOTSUPP;
272 }
273
274 return err;
275 }
276
277 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
278 struct nft_flow_rule *flow,
279 const struct nft_payload *priv)
280 {
281 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
282
283 switch (priv->offset) {
284 case offsetof(struct tcphdr, source):
285 if (priv->len != sizeof(__be16))
286 return -EOPNOTSUPP;
287
288 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
289 sizeof(__be16), reg);
290 break;
291 case offsetof(struct tcphdr, dest):
292 if (priv->len != sizeof(__be16))
293 return -EOPNOTSUPP;
294
295 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
296 sizeof(__be16), reg);
297 break;
298 default:
299 return -EOPNOTSUPP;
300 }
301
302 return 0;
303 }
304
305 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
306 struct nft_flow_rule *flow,
307 const struct nft_payload *priv)
308 {
309 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
310
311 switch (priv->offset) {
312 case offsetof(struct udphdr, source):
313 if (priv->len != sizeof(__be16))
314 return -EOPNOTSUPP;
315
316 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
317 sizeof(__be16), reg);
318 break;
319 case offsetof(struct udphdr, dest):
320 if (priv->len != sizeof(__be16))
321 return -EOPNOTSUPP;
322
323 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
324 sizeof(__be16), reg);
325 break;
326 default:
327 return -EOPNOTSUPP;
328 }
329
330 return 0;
331 }
332
333 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
334 struct nft_flow_rule *flow,
335 const struct nft_payload *priv)
336 {
337 int err;
338
339 switch (ctx->dep.protonum) {
340 case IPPROTO_TCP:
341 err = nft_payload_offload_tcp(ctx, flow, priv);
342 break;
343 case IPPROTO_UDP:
344 err = nft_payload_offload_udp(ctx, flow, priv);
345 break;
346 default:
347 return -EOPNOTSUPP;
348 }
349
350 return err;
351 }
352
353 static int nft_payload_offload(struct nft_offload_ctx *ctx,
354 struct nft_flow_rule *flow,
355 const struct nft_expr *expr)
356 {
357 const struct nft_payload *priv = nft_expr_priv(expr);
358 int err;
359
360 switch (priv->base) {
361 case NFT_PAYLOAD_LL_HEADER:
362 err = nft_payload_offload_ll(ctx, flow, priv);
363 break;
364 case NFT_PAYLOAD_NETWORK_HEADER:
365 err = nft_payload_offload_nh(ctx, flow, priv);
366 break;
367 case NFT_PAYLOAD_TRANSPORT_HEADER:
368 err = nft_payload_offload_th(ctx, flow, priv);
369 break;
370 default:
371 err = -EOPNOTSUPP;
372 break;
373 }
374 return err;
375 }
376
377 static const struct nft_expr_ops nft_payload_ops = {
378 .type = &nft_payload_type,
379 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
380 .eval = nft_payload_eval,
381 .init = nft_payload_init,
382 .dump = nft_payload_dump,
383 .offload = nft_payload_offload,
384 };
385
386 const struct nft_expr_ops nft_payload_fast_ops = {
387 .type = &nft_payload_type,
388 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
389 .eval = nft_payload_eval,
390 .init = nft_payload_init,
391 .dump = nft_payload_dump,
392 .offload = nft_payload_offload,
393 };
394
395 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
396 {
397 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
398 if (*sum == 0)
399 *sum = CSUM_MANGLED_0;
400 }
401
402 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
403 {
404 struct udphdr *uh, _uh;
405
406 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
407 if (!uh)
408 return false;
409
410 return (__force bool)uh->check;
411 }
412
413 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
414 struct sk_buff *skb,
415 unsigned int *l4csum_offset)
416 {
417 switch (pkt->tprot) {
418 case IPPROTO_TCP:
419 *l4csum_offset = offsetof(struct tcphdr, check);
420 break;
421 case IPPROTO_UDP:
422 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
423 return -1;
424
425 case IPPROTO_UDPLITE:
426 *l4csum_offset = offsetof(struct udphdr, check);
427 break;
428 case IPPROTO_ICMPV6:
429 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
430 break;
431 default:
432 return -1;
433 }
434
435 *l4csum_offset += pkt->xt.thoff;
436 return 0;
437 }
438
439 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
440 struct sk_buff *skb,
441 __wsum fsum, __wsum tsum)
442 {
443 int l4csum_offset;
444 __sum16 sum;
445
446
447
448
449 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
450 return 0;
451
452 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
453 return -1;
454
455
456
457
458 if (skb->ip_summed != CHECKSUM_PARTIAL) {
459 nft_csum_replace(&sum, fsum, tsum);
460 if (skb->ip_summed == CHECKSUM_COMPLETE) {
461 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
462 tsum);
463 }
464 } else {
465 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
466 tsum));
467 }
468
469 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
470 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
471 return -1;
472
473 return 0;
474 }
475
476 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
477 __wsum fsum, __wsum tsum, int csum_offset)
478 {
479 __sum16 sum;
480
481 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
482 return -1;
483
484 nft_csum_replace(&sum, fsum, tsum);
485 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
486 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
487 return -1;
488
489 return 0;
490 }
491
492 static void nft_payload_set_eval(const struct nft_expr *expr,
493 struct nft_regs *regs,
494 const struct nft_pktinfo *pkt)
495 {
496 const struct nft_payload_set *priv = nft_expr_priv(expr);
497 struct sk_buff *skb = pkt->skb;
498 const u32 *src = ®s->data[priv->sreg];
499 int offset, csum_offset;
500 __wsum fsum, tsum;
501
502 switch (priv->base) {
503 case NFT_PAYLOAD_LL_HEADER:
504 if (!skb_mac_header_was_set(skb))
505 goto err;
506 offset = skb_mac_header(skb) - skb->data;
507 break;
508 case NFT_PAYLOAD_NETWORK_HEADER:
509 offset = skb_network_offset(skb);
510 break;
511 case NFT_PAYLOAD_TRANSPORT_HEADER:
512 if (!pkt->tprot_set)
513 goto err;
514 offset = pkt->xt.thoff;
515 break;
516 default:
517 BUG();
518 }
519
520 csum_offset = offset + priv->csum_offset;
521 offset += priv->offset;
522
523 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
524 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
525 skb->ip_summed != CHECKSUM_PARTIAL)) {
526 fsum = skb_checksum(skb, offset, priv->len, 0);
527 tsum = csum_partial(src, priv->len, 0);
528
529 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
530 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
531 goto err;
532
533 if (priv->csum_flags &&
534 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
535 goto err;
536 }
537
538 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
539 skb_store_bits(skb, offset, src, priv->len) < 0)
540 goto err;
541
542 return;
543 err:
544 regs->verdict.code = NFT_BREAK;
545 }
546
547 static int nft_payload_set_init(const struct nft_ctx *ctx,
548 const struct nft_expr *expr,
549 const struct nlattr * const tb[])
550 {
551 struct nft_payload_set *priv = nft_expr_priv(expr);
552
553 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
554 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
555 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
556 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
557
558 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
559 priv->csum_type =
560 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
561 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
562 priv->csum_offset =
563 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
564 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
565 u32 flags;
566
567 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
568 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
569 return -EINVAL;
570
571 priv->csum_flags = flags;
572 }
573
574 switch (priv->csum_type) {
575 case NFT_PAYLOAD_CSUM_NONE:
576 case NFT_PAYLOAD_CSUM_INET:
577 break;
578 default:
579 return -EOPNOTSUPP;
580 }
581
582 return nft_validate_register_load(priv->sreg, priv->len);
583 }
584
585 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
586 {
587 const struct nft_payload_set *priv = nft_expr_priv(expr);
588
589 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
590 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
591 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
592 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
593 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
594 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
595 htonl(priv->csum_offset)) ||
596 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
597 goto nla_put_failure;
598 return 0;
599
600 nla_put_failure:
601 return -1;
602 }
603
604 static const struct nft_expr_ops nft_payload_set_ops = {
605 .type = &nft_payload_type,
606 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
607 .eval = nft_payload_set_eval,
608 .init = nft_payload_set_init,
609 .dump = nft_payload_set_dump,
610 };
611
612 static const struct nft_expr_ops *
613 nft_payload_select_ops(const struct nft_ctx *ctx,
614 const struct nlattr * const tb[])
615 {
616 enum nft_payload_bases base;
617 unsigned int offset, len;
618
619 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
620 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
621 tb[NFTA_PAYLOAD_LEN] == NULL)
622 return ERR_PTR(-EINVAL);
623
624 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
625 switch (base) {
626 case NFT_PAYLOAD_LL_HEADER:
627 case NFT_PAYLOAD_NETWORK_HEADER:
628 case NFT_PAYLOAD_TRANSPORT_HEADER:
629 break;
630 default:
631 return ERR_PTR(-EOPNOTSUPP);
632 }
633
634 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
635 if (tb[NFTA_PAYLOAD_DREG] != NULL)
636 return ERR_PTR(-EINVAL);
637 return &nft_payload_set_ops;
638 }
639
640 if (tb[NFTA_PAYLOAD_DREG] == NULL)
641 return ERR_PTR(-EINVAL);
642
643 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
644 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
645
646 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
647 base != NFT_PAYLOAD_LL_HEADER)
648 return &nft_payload_fast_ops;
649 else
650 return &nft_payload_ops;
651 }
652
653 struct nft_expr_type nft_payload_type __read_mostly = {
654 .name = "payload",
655 .select_ops = nft_payload_select_ops,
656 .policy = nft_payload_policy,
657 .maxattr = NFTA_PAYLOAD_MAX,
658 .owner = THIS_MODULE,
659 };