root/drivers/net/ethernet/netronome/nfp/bpf/offload.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_map_ptr_record
  2. nfp_map_ptrs_forget
  3. nfp_map_ptrs_record
  4. nfp_prog_prepare
  5. nfp_prog_free
  6. nfp_bpf_verifier_prep
  7. nfp_bpf_translate
  8. nfp_bpf_destroy
  9. nfp_map_bpf_byte_swap
  10. nfp_map_bpf_byte_swap_record
  11. nfp_bpf_map_lookup_entry
  12. nfp_bpf_map_update_entry
  13. nfp_bpf_map_get_next_key
  14. nfp_bpf_map_delete_elem
  15. nfp_bpf_map_alloc
  16. nfp_bpf_map_free
  17. nfp_ndo_bpf
  18. nfp_bpf_perf_event_copy
  19. nfp_bpf_event_output
  20. nfp_net_bpf_load
  21. nfp_net_bpf_start
  22. nfp_net_bpf_stop
  23. nfp_net_bpf_offload

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
   3 
   4 /*
   5  * nfp_net_offload.c
   6  * Netronome network device driver: TC offload functions for PF and VF
   7  */
   8 
   9 #define pr_fmt(fmt)     "NFP net bpf: " fmt
  10 
  11 #include <linux/bpf.h>
  12 #include <linux/kernel.h>
  13 #include <linux/netdevice.h>
  14 #include <linux/pci.h>
  15 #include <linux/jiffies.h>
  16 #include <linux/timer.h>
  17 #include <linux/list.h>
  18 #include <linux/mm.h>
  19 
  20 #include <net/pkt_cls.h>
  21 #include <net/tc_act/tc_gact.h>
  22 #include <net/tc_act/tc_mirred.h>
  23 
  24 #include "main.h"
  25 #include "../ccm.h"
  26 #include "../nfp_app.h"
  27 #include "../nfp_net_ctrl.h"
  28 #include "../nfp_net.h"
  29 
  30 static int
  31 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
  32                    struct bpf_map *map)
  33 {
  34         struct nfp_bpf_neutral_map *record;
  35         int err;
  36 
  37         /* Reuse path - other offloaded program is already tracking this map. */
  38         record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
  39                                         nfp_bpf_maps_neutral_params);
  40         if (record) {
  41                 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
  42                 record->count++;
  43                 return 0;
  44         }
  45 
  46         /* Grab a single ref to the map for our record.  The prog destroy ndo
  47          * happens after free_used_maps().
  48          */
  49         map = bpf_map_inc(map, false);
  50         if (IS_ERR(map))
  51                 return PTR_ERR(map);
  52 
  53         record = kmalloc(sizeof(*record), GFP_KERNEL);
  54         if (!record) {
  55                 err = -ENOMEM;
  56                 goto err_map_put;
  57         }
  58 
  59         record->ptr = map;
  60         record->map_id = map->id;
  61         record->count = 1;
  62 
  63         err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
  64                                      nfp_bpf_maps_neutral_params);
  65         if (err)
  66                 goto err_free_rec;
  67 
  68         nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
  69 
  70         return 0;
  71 
  72 err_free_rec:
  73         kfree(record);
  74 err_map_put:
  75         bpf_map_put(map);
  76         return err;
  77 }
  78 
  79 static void
  80 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
  81 {
  82         bool freed = false;
  83         int i;
  84 
  85         for (i = 0; i < nfp_prog->map_records_cnt; i++) {
  86                 if (--nfp_prog->map_records[i]->count) {
  87                         nfp_prog->map_records[i] = NULL;
  88                         continue;
  89                 }
  90 
  91                 WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
  92                                                &nfp_prog->map_records[i]->l,
  93                                                nfp_bpf_maps_neutral_params));
  94                 freed = true;
  95         }
  96 
  97         if (freed) {
  98                 synchronize_rcu();
  99 
 100                 for (i = 0; i < nfp_prog->map_records_cnt; i++)
 101                         if (nfp_prog->map_records[i]) {
 102                                 bpf_map_put(nfp_prog->map_records[i]->ptr);
 103                                 kfree(nfp_prog->map_records[i]);
 104                         }
 105         }
 106 
 107         kfree(nfp_prog->map_records);
 108         nfp_prog->map_records = NULL;
 109         nfp_prog->map_records_cnt = 0;
 110 }
 111 
 112 static int
 113 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
 114                     struct bpf_prog *prog)
 115 {
 116         int i, cnt, err;
 117 
 118         /* Quickly count the maps we will have to remember */
 119         cnt = 0;
 120         for (i = 0; i < prog->aux->used_map_cnt; i++)
 121                 if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
 122                         cnt++;
 123         if (!cnt)
 124                 return 0;
 125 
 126         nfp_prog->map_records = kmalloc_array(cnt,
 127                                               sizeof(nfp_prog->map_records[0]),
 128                                               GFP_KERNEL);
 129         if (!nfp_prog->map_records)
 130                 return -ENOMEM;
 131 
 132         for (i = 0; i < prog->aux->used_map_cnt; i++)
 133                 if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
 134                         err = nfp_map_ptr_record(bpf, nfp_prog,
 135                                                  prog->aux->used_maps[i]);
 136                         if (err) {
 137                                 nfp_map_ptrs_forget(bpf, nfp_prog);
 138                                 return err;
 139                         }
 140                 }
 141         WARN_ON(cnt != nfp_prog->map_records_cnt);
 142 
 143         return 0;
 144 }
 145 
 146 static int
 147 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 148                  unsigned int cnt)
 149 {
 150         struct nfp_insn_meta *meta;
 151         unsigned int i;
 152 
 153         for (i = 0; i < cnt; i++) {
 154                 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
 155                 if (!meta)
 156                         return -ENOMEM;
 157 
 158                 meta->insn = prog[i];
 159                 meta->n = i;
 160                 if (is_mbpf_alu(meta)) {
 161                         meta->umin_src = U64_MAX;
 162                         meta->umin_dst = U64_MAX;
 163                 }
 164 
 165                 list_add_tail(&meta->l, &nfp_prog->insns);
 166         }
 167         nfp_prog->n_insns = cnt;
 168 
 169         nfp_bpf_jit_prepare(nfp_prog);
 170 
 171         return 0;
 172 }
 173 
 174 static void nfp_prog_free(struct nfp_prog *nfp_prog)
 175 {
 176         struct nfp_insn_meta *meta, *tmp;
 177 
 178         kfree(nfp_prog->subprog);
 179 
 180         list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
 181                 list_del(&meta->l);
 182                 kfree(meta);
 183         }
 184         kfree(nfp_prog);
 185 }
 186 
 187 static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
 188 {
 189         struct nfp_prog *nfp_prog;
 190         int ret;
 191 
 192         nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
 193         if (!nfp_prog)
 194                 return -ENOMEM;
 195         prog->aux->offload->dev_priv = nfp_prog;
 196 
 197         INIT_LIST_HEAD(&nfp_prog->insns);
 198         nfp_prog->type = prog->type;
 199         nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
 200 
 201         ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
 202         if (ret)
 203                 goto err_free;
 204 
 205         nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
 206 
 207         return 0;
 208 
 209 err_free:
 210         nfp_prog_free(nfp_prog);
 211 
 212         return ret;
 213 }
 214 
 215 static int nfp_bpf_translate(struct bpf_prog *prog)
 216 {
 217         struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
 218         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 219         unsigned int max_instr;
 220         int err;
 221 
 222         /* We depend on dead code elimination succeeding */
 223         if (prog->aux->offload->opt_failed)
 224                 return -EINVAL;
 225 
 226         max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
 227         nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
 228 
 229         nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
 230         if (!nfp_prog->prog)
 231                 return -ENOMEM;
 232 
 233         err = nfp_bpf_jit(nfp_prog);
 234         if (err)
 235                 return err;
 236 
 237         prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
 238         prog->aux->offload->jited_image = nfp_prog->prog;
 239 
 240         return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
 241 }
 242 
 243 static void nfp_bpf_destroy(struct bpf_prog *prog)
 244 {
 245         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 246 
 247         kvfree(nfp_prog->prog);
 248         nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
 249         nfp_prog_free(nfp_prog);
 250 }
 251 
 252 /* Atomic engine requires values to be in big endian, we need to byte swap
 253  * the value words used with xadd.
 254  */
 255 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
 256 {
 257         u32 *word = value;
 258         unsigned int i;
 259 
 260         for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
 261                 if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
 262                         word[i] = (__force u32)cpu_to_be32(word[i]);
 263 }
 264 
 265 /* Mark value as unsafely initialized in case it becomes atomic later
 266  * and we didn't byte swap something non-byte swap neutral.
 267  */
 268 static void
 269 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
 270 {
 271         u32 *word = value;
 272         unsigned int i;
 273 
 274         for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
 275                 if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
 276                     word[i] != (__force u32)cpu_to_be32(word[i]))
 277                         nfp_map->use_map[i].non_zero_update = 1;
 278 }
 279 
 280 static int
 281 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
 282                          void *key, void *value)
 283 {
 284         int err;
 285 
 286         err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
 287         if (err)
 288                 return err;
 289 
 290         nfp_map_bpf_byte_swap(offmap->dev_priv, value);
 291         return 0;
 292 }
 293 
 294 static int
 295 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
 296                          void *key, void *value, u64 flags)
 297 {
 298         nfp_map_bpf_byte_swap(offmap->dev_priv, value);
 299         nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
 300         return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
 301 }
 302 
 303 static int
 304 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
 305                          void *key, void *next_key)
 306 {
 307         if (!key)
 308                 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
 309         return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
 310 }
 311 
 312 static int
 313 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
 314 {
 315         if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
 316                 return -EINVAL;
 317         return nfp_bpf_ctrl_del_entry(offmap, key);
 318 }
 319 
 320 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
 321         .map_get_next_key       = nfp_bpf_map_get_next_key,
 322         .map_lookup_elem        = nfp_bpf_map_lookup_entry,
 323         .map_update_elem        = nfp_bpf_map_update_entry,
 324         .map_delete_elem        = nfp_bpf_map_delete_elem,
 325 };
 326 
 327 static int
 328 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
 329 {
 330         struct nfp_bpf_map *nfp_map;
 331         unsigned int use_map_size;
 332         long long int res;
 333 
 334         if (!bpf->maps.types)
 335                 return -EOPNOTSUPP;
 336 
 337         if (offmap->map.map_flags ||
 338             offmap->map.numa_node != NUMA_NO_NODE) {
 339                 pr_info("map flags are not supported\n");
 340                 return -EINVAL;
 341         }
 342 
 343         if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
 344                 pr_info("map type not supported\n");
 345                 return -EOPNOTSUPP;
 346         }
 347         if (bpf->maps.max_maps == bpf->maps_in_use) {
 348                 pr_info("too many maps for a device\n");
 349                 return -ENOMEM;
 350         }
 351         if (bpf->maps.max_elems - bpf->map_elems_in_use <
 352             offmap->map.max_entries) {
 353                 pr_info("map with too many elements: %u, left: %u\n",
 354                         offmap->map.max_entries,
 355                         bpf->maps.max_elems - bpf->map_elems_in_use);
 356                 return -ENOMEM;
 357         }
 358 
 359         if (round_up(offmap->map.key_size, 8) +
 360             round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
 361                 pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
 362                         round_up(offmap->map.key_size, 8) +
 363                         round_up(offmap->map.value_size, 8),
 364                         bpf->maps.max_elem_sz);
 365                 return -ENOMEM;
 366         }
 367         if (offmap->map.key_size > bpf->maps.max_key_sz) {
 368                 pr_info("map key size %u, FW max is %u\n",
 369                         offmap->map.key_size, bpf->maps.max_key_sz);
 370                 return -ENOMEM;
 371         }
 372         if (offmap->map.value_size > bpf->maps.max_val_sz) {
 373                 pr_info("map value size %u, FW max is %u\n",
 374                         offmap->map.value_size, bpf->maps.max_val_sz);
 375                 return -ENOMEM;
 376         }
 377 
 378         use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
 379                        FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
 380 
 381         nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
 382         if (!nfp_map)
 383                 return -ENOMEM;
 384 
 385         offmap->dev_priv = nfp_map;
 386         nfp_map->offmap = offmap;
 387         nfp_map->bpf = bpf;
 388         spin_lock_init(&nfp_map->cache_lock);
 389 
 390         res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
 391         if (res < 0) {
 392                 kfree(nfp_map);
 393                 return res;
 394         }
 395 
 396         nfp_map->tid = res;
 397         offmap->dev_ops = &nfp_bpf_map_ops;
 398         bpf->maps_in_use++;
 399         bpf->map_elems_in_use += offmap->map.max_entries;
 400         list_add_tail(&nfp_map->l, &bpf->map_list);
 401 
 402         return 0;
 403 }
 404 
 405 static int
 406 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
 407 {
 408         struct nfp_bpf_map *nfp_map = offmap->dev_priv;
 409 
 410         nfp_bpf_ctrl_free_map(bpf, nfp_map);
 411         dev_consume_skb_any(nfp_map->cache);
 412         WARN_ON_ONCE(nfp_map->cache_blockers);
 413         list_del_init(&nfp_map->l);
 414         bpf->map_elems_in_use -= offmap->map.max_entries;
 415         bpf->maps_in_use--;
 416         kfree(nfp_map);
 417 
 418         return 0;
 419 }
 420 
 421 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
 422 {
 423         switch (bpf->command) {
 424         case BPF_OFFLOAD_MAP_ALLOC:
 425                 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
 426         case BPF_OFFLOAD_MAP_FREE:
 427                 return nfp_bpf_map_free(app->priv, bpf->offmap);
 428         default:
 429                 return -EINVAL;
 430         }
 431 }
 432 
 433 static unsigned long
 434 nfp_bpf_perf_event_copy(void *dst, const void *src,
 435                         unsigned long off, unsigned long len)
 436 {
 437         memcpy(dst, src + off, len);
 438         return 0;
 439 }
 440 
 441 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
 442                          unsigned int len)
 443 {
 444         struct cmsg_bpf_event *cbe = (void *)data;
 445         struct nfp_bpf_neutral_map *record;
 446         u32 pkt_size, data_size, map_id;
 447         u64 map_id_full;
 448 
 449         if (len < sizeof(struct cmsg_bpf_event))
 450                 return -EINVAL;
 451 
 452         pkt_size = be32_to_cpu(cbe->pkt_size);
 453         data_size = be32_to_cpu(cbe->data_size);
 454         map_id_full = be64_to_cpu(cbe->map_ptr);
 455         map_id = map_id_full;
 456 
 457         if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
 458                 return -EINVAL;
 459         if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
 460                 return -EINVAL;
 461 
 462         rcu_read_lock();
 463         record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
 464                                         nfp_bpf_maps_neutral_params);
 465         if (!record || map_id_full > U32_MAX) {
 466                 rcu_read_unlock();
 467                 cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
 468                           map_id_full, map_id_full);
 469                 return -EINVAL;
 470         }
 471 
 472         bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
 473                          &cbe->data[round_up(pkt_size, 4)], data_size,
 474                          cbe->data, pkt_size, nfp_bpf_perf_event_copy);
 475         rcu_read_unlock();
 476 
 477         return 0;
 478 }
 479 
 480 static int
 481 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
 482                  struct netlink_ext_ack *extack)
 483 {
 484         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 485         unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
 486         dma_addr_t dma_addr;
 487         void *img;
 488         int err;
 489 
 490         fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
 491         pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
 492         if (fw_mtu < pkt_off) {
 493                 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
 494                 return -EOPNOTSUPP;
 495         }
 496 
 497         max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
 498         if (nfp_prog->stack_size > max_stack) {
 499                 NL_SET_ERR_MSG_MOD(extack, "stack too large");
 500                 return -EOPNOTSUPP;
 501         }
 502 
 503         max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
 504         if (nfp_prog->prog_len > max_prog_len) {
 505                 NL_SET_ERR_MSG_MOD(extack, "program too long");
 506                 return -EOPNOTSUPP;
 507         }
 508 
 509         img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
 510         if (IS_ERR(img))
 511                 return PTR_ERR(img);
 512 
 513         dma_addr = dma_map_single(nn->dp.dev, img,
 514                                   nfp_prog->prog_len * sizeof(u64),
 515                                   DMA_TO_DEVICE);
 516         if (dma_mapping_error(nn->dp.dev, dma_addr)) {
 517                 kfree(img);
 518                 return -ENOMEM;
 519         }
 520 
 521         nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
 522         nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
 523 
 524         /* Load up the JITed code */
 525         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
 526         if (err)
 527                 NL_SET_ERR_MSG_MOD(extack,
 528                                    "FW command error while loading BPF");
 529 
 530         dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
 531                          DMA_TO_DEVICE);
 532         kfree(img);
 533 
 534         return err;
 535 }
 536 
 537 static void
 538 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
 539 {
 540         int err;
 541 
 542         /* Enable passing packets through BPF function */
 543         nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
 544         nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 545         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 546         if (err)
 547                 NL_SET_ERR_MSG_MOD(extack,
 548                                    "FW command error while enabling BPF");
 549 }
 550 
 551 static int nfp_net_bpf_stop(struct nfp_net *nn)
 552 {
 553         if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
 554                 return 0;
 555 
 556         nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
 557         nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 558 
 559         return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 560 }
 561 
 562 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 563                         bool old_prog, struct netlink_ext_ack *extack)
 564 {
 565         int err;
 566 
 567         if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
 568                 return -EINVAL;
 569 
 570         if (prog && old_prog) {
 571                 u8 cap;
 572 
 573                 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
 574                 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
 575                         NL_SET_ERR_MSG_MOD(extack,
 576                                            "FW does not support live reload");
 577                         return -EBUSY;
 578                 }
 579         }
 580 
 581         /* Something else is loaded, different program type? */
 582         if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
 583                 return -EBUSY;
 584 
 585         if (old_prog && !prog)
 586                 return nfp_net_bpf_stop(nn);
 587 
 588         err = nfp_net_bpf_load(nn, prog, extack);
 589         if (err)
 590                 return err;
 591 
 592         if (!old_prog)
 593                 nfp_net_bpf_start(nn, extack);
 594 
 595         return 0;
 596 }
 597 
 598 const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
 599         .insn_hook      = nfp_verify_insn,
 600         .finalize       = nfp_bpf_finalize,
 601         .replace_insn   = nfp_bpf_opt_replace_insn,
 602         .remove_insns   = nfp_bpf_opt_remove_insns,
 603         .prepare        = nfp_bpf_verifier_prep,
 604         .translate      = nfp_bpf_translate,
 605         .destroy        = nfp_bpf_destroy,
 606 };

/* [<][>][^][v][top][bottom][index][help] */