root/drivers/net/ethernet/netronome/nfp/flower/offload.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_flower_xmit_flow
  2. nfp_flower_check_higher_than_mac
  3. nfp_flower_check_higher_than_l3
  4. nfp_flower_calc_opt_layer
  5. nfp_flower_calc_udp_tun_layer
  6. nfp_flower_calculate_key_layers
  7. nfp_flower_allocate_new
  8. nfp_flower_update_merge_with_actions
  9. nfp_flower_populate_merge_match
  10. nfp_flower_can_merge
  11. nfp_flower_copy_pre_actions
  12. nfp_fl_verify_post_tun_acts
  13. nfp_fl_push_vlan_after_tun
  14. nfp_flower_merge_action
  15. nfp_flower_unlink_flow
  16. nfp_flower_unlink_flows
  17. nfp_flower_link_flows
  18. nfp_flower_merge_offloaded_flows
  19. nfp_flower_validate_pre_tun_rule
  20. nfp_flower_add_offload
  21. nfp_flower_remove_merge_flow
  22. nfp_flower_del_linked_merge_flows
  23. nfp_flower_del_offload
  24. __nfp_flower_update_merge_stats
  25. nfp_flower_update_merge_stats
  26. nfp_flower_get_stats
  27. nfp_flower_repr_offload
  28. nfp_flower_setup_tc_block_cb
  29. nfp_flower_setup_tc_block
  30. nfp_flower_setup_tc
  31. nfp_flower_indr_block_cb_priv_lookup
  32. nfp_flower_setup_indr_block_cb
  33. nfp_flower_setup_indr_tc_release
  34. nfp_flower_setup_indr_tc_block
  35. nfp_flower_indr_setup_tc_cb
  36. nfp_flower_reg_indir_block_handler

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3 
   4 #include <linux/skbuff.h>
   5 #include <net/devlink.h>
   6 #include <net/pkt_cls.h>
   7 
   8 #include "cmsg.h"
   9 #include "main.h"
  10 #include "../nfpcore/nfp_cpp.h"
  11 #include "../nfpcore/nfp_nsp.h"
  12 #include "../nfp_app.h"
  13 #include "../nfp_main.h"
  14 #include "../nfp_net.h"
  15 #include "../nfp_port.h"
  16 
  17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
  18         (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
  19          TCPHDR_PSH | TCPHDR_URG)
  20 
  21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
  22         (FLOW_DIS_IS_FRAGMENT | \
  23          FLOW_DIS_FIRST_FRAG)
  24 
  25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
  26         (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
  27          BIT(FLOW_DISSECTOR_KEY_BASIC) | \
  28          BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
  29          BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
  30          BIT(FLOW_DISSECTOR_KEY_TCP) | \
  31          BIT(FLOW_DISSECTOR_KEY_PORTS) | \
  32          BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
  33          BIT(FLOW_DISSECTOR_KEY_VLAN) | \
  34          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
  35          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
  36          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
  37          BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  38          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
  39          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
  40          BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
  41          BIT(FLOW_DISSECTOR_KEY_MPLS) | \
  42          BIT(FLOW_DISSECTOR_KEY_IP))
  43 
  44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
  45         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  46          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
  47          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
  48          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
  49          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
  50          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
  51          BIT(FLOW_DISSECTOR_KEY_ENC_IP))
  52 
  53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
  54         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  55          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
  56 
  57 #define NFP_FLOWER_MERGE_FIELDS \
  58         (NFP_FLOWER_LAYER_PORT | \
  59          NFP_FLOWER_LAYER_MAC | \
  60          NFP_FLOWER_LAYER_TP | \
  61          NFP_FLOWER_LAYER_IPV4 | \
  62          NFP_FLOWER_LAYER_IPV6)
  63 
  64 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
  65         (NFP_FLOWER_LAYER_PORT | \
  66          NFP_FLOWER_LAYER_MAC | \
  67          NFP_FLOWER_LAYER_IPV4)
  68 
  69 struct nfp_flower_merge_check {
  70         union {
  71                 struct {
  72                         __be16 tci;
  73                         struct nfp_flower_mac_mpls l2;
  74                         struct nfp_flower_tp_ports l4;
  75                         union {
  76                                 struct nfp_flower_ipv4 ipv4;
  77                                 struct nfp_flower_ipv6 ipv6;
  78                         };
  79                 };
  80                 unsigned long vals[8];
  81         };
  82 };
  83 
  84 static int
  85 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
  86                      u8 mtype)
  87 {
  88         u32 meta_len, key_len, mask_len, act_len, tot_len;
  89         struct sk_buff *skb;
  90         unsigned char *msg;
  91 
  92         meta_len =  sizeof(struct nfp_fl_rule_metadata);
  93         key_len = nfp_flow->meta.key_len;
  94         mask_len = nfp_flow->meta.mask_len;
  95         act_len = nfp_flow->meta.act_len;
  96 
  97         tot_len = meta_len + key_len + mask_len + act_len;
  98 
  99         /* Convert to long words as firmware expects
 100          * lengths in units of NFP_FL_LW_SIZ.
 101          */
 102         nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
 103         nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
 104         nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
 105 
 106         skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
 107         if (!skb)
 108                 return -ENOMEM;
 109 
 110         msg = nfp_flower_cmsg_get_data(skb);
 111         memcpy(msg, &nfp_flow->meta, meta_len);
 112         memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
 113         memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
 114         memcpy(&msg[meta_len + key_len + mask_len],
 115                nfp_flow->action_data, act_len);
 116 
 117         /* Convert back to bytes as software expects
 118          * lengths in units of bytes.
 119          */
 120         nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
 121         nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
 122         nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
 123 
 124         nfp_ctrl_tx(app->ctrl, skb);
 125 
 126         return 0;
 127 }
 128 
 129 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
 130 {
 131         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
 132 
 133         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
 134                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
 135                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
 136                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
 137 }
 138 
 139 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
 140 {
 141         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
 142 
 143         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
 144                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
 145 }
 146 
 147 static int
 148 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
 149                           u32 *key_layer_two, int *key_size,
 150                           struct netlink_ext_ack *extack)
 151 {
 152         if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
 153                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
 154                 return -EOPNOTSUPP;
 155         }
 156 
 157         if (enc_opts->len > 0) {
 158                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
 159                 *key_size += sizeof(struct nfp_flower_geneve_options);
 160         }
 161 
 162         return 0;
 163 }
 164 
 165 static int
 166 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
 167                               struct flow_dissector_key_enc_opts *enc_op,
 168                               u32 *key_layer_two, u8 *key_layer, int *key_size,
 169                               struct nfp_flower_priv *priv,
 170                               enum nfp_flower_tun_type *tun_type,
 171                               struct netlink_ext_ack *extack)
 172 {
 173         int err;
 174 
 175         switch (enc_ports->dst) {
 176         case htons(IANA_VXLAN_UDP_PORT):
 177                 *tun_type = NFP_FL_TUNNEL_VXLAN;
 178                 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
 179                 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
 180 
 181                 if (enc_op) {
 182                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
 183                         return -EOPNOTSUPP;
 184                 }
 185                 break;
 186         case htons(GENEVE_UDP_PORT):
 187                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
 188                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
 189                         return -EOPNOTSUPP;
 190                 }
 191                 *tun_type = NFP_FL_TUNNEL_GENEVE;
 192                 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
 193                 *key_size += sizeof(struct nfp_flower_ext_meta);
 194                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
 195                 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
 196 
 197                 if (!enc_op)
 198                         break;
 199                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
 200                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
 201                         return -EOPNOTSUPP;
 202                 }
 203                 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
 204                                                 key_size, extack);
 205                 if (err)
 206                         return err;
 207                 break;
 208         default:
 209                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
 210                 return -EOPNOTSUPP;
 211         }
 212 
 213         return 0;
 214 }
 215 
 216 static int
 217 nfp_flower_calculate_key_layers(struct nfp_app *app,
 218                                 struct net_device *netdev,
 219                                 struct nfp_fl_key_ls *ret_key_ls,
 220                                 struct flow_cls_offload *flow,
 221                                 enum nfp_flower_tun_type *tun_type,
 222                                 struct netlink_ext_ack *extack)
 223 {
 224         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
 225         struct flow_dissector *dissector = rule->match.dissector;
 226         struct flow_match_basic basic = { NULL, NULL};
 227         struct nfp_flower_priv *priv = app->priv;
 228         u32 key_layer_two;
 229         u8 key_layer;
 230         int key_size;
 231         int err;
 232 
 233         if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
 234                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
 235                 return -EOPNOTSUPP;
 236         }
 237 
 238         /* If any tun dissector is used then the required set must be used. */
 239         if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
 240             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
 241             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
 242                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
 243                 return -EOPNOTSUPP;
 244         }
 245 
 246         key_layer_two = 0;
 247         key_layer = NFP_FLOWER_LAYER_PORT;
 248         key_size = sizeof(struct nfp_flower_meta_tci) +
 249                    sizeof(struct nfp_flower_in_port);
 250 
 251         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
 252             flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
 253                 key_layer |= NFP_FLOWER_LAYER_MAC;
 254                 key_size += sizeof(struct nfp_flower_mac_mpls);
 255         }
 256 
 257         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
 258                 struct flow_match_vlan vlan;
 259 
 260                 flow_rule_match_vlan(rule, &vlan);
 261                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
 262                     vlan.key->vlan_priority) {
 263                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
 264                         return -EOPNOTSUPP;
 265                 }
 266         }
 267 
 268         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
 269                 struct flow_match_enc_opts enc_op = { NULL, NULL };
 270                 struct flow_match_ipv4_addrs ipv4_addrs;
 271                 struct flow_match_control enc_ctl;
 272                 struct flow_match_ports enc_ports;
 273 
 274                 flow_rule_match_enc_control(rule, &enc_ctl);
 275 
 276                 if (enc_ctl.mask->addr_type != 0xffff) {
 277                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
 278                         return -EOPNOTSUPP;
 279                 }
 280                 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
 281                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
 282                         return -EOPNOTSUPP;
 283                 }
 284 
 285                 /* These fields are already verified as used. */
 286                 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
 287                 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
 288                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
 289                         return -EOPNOTSUPP;
 290                 }
 291 
 292                 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
 293                         flow_rule_match_enc_opts(rule, &enc_op);
 294 
 295 
 296                 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
 297                         /* check if GRE, which has no enc_ports */
 298                         if (netif_is_gretap(netdev)) {
 299                                 *tun_type = NFP_FL_TUNNEL_GRE;
 300                                 key_layer |= NFP_FLOWER_LAYER_EXT_META;
 301                                 key_size += sizeof(struct nfp_flower_ext_meta);
 302                                 key_layer_two |= NFP_FLOWER_LAYER2_GRE;
 303                                 key_size +=
 304                                         sizeof(struct nfp_flower_ipv4_gre_tun);
 305 
 306                                 if (enc_op.key) {
 307                                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
 308                                         return -EOPNOTSUPP;
 309                                 }
 310                         } else {
 311                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
 312                                 return -EOPNOTSUPP;
 313                         }
 314                 } else {
 315                         flow_rule_match_enc_ports(rule, &enc_ports);
 316                         if (enc_ports.mask->dst != cpu_to_be16(~0)) {
 317                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
 318                                 return -EOPNOTSUPP;
 319                         }
 320 
 321                         err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
 322                                                             enc_op.key,
 323                                                             &key_layer_two,
 324                                                             &key_layer,
 325                                                             &key_size, priv,
 326                                                             tun_type, extack);
 327                         if (err)
 328                                 return err;
 329 
 330                         /* Ensure the ingress netdev matches the expected
 331                          * tun type.
 332                          */
 333                         if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
 334                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
 335                                 return -EOPNOTSUPP;
 336                         }
 337                 }
 338         }
 339 
 340         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
 341                 flow_rule_match_basic(rule, &basic);
 342 
 343         if (basic.mask && basic.mask->n_proto) {
 344                 /* Ethernet type is present in the key. */
 345                 switch (basic.key->n_proto) {
 346                 case cpu_to_be16(ETH_P_IP):
 347                         key_layer |= NFP_FLOWER_LAYER_IPV4;
 348                         key_size += sizeof(struct nfp_flower_ipv4);
 349                         break;
 350 
 351                 case cpu_to_be16(ETH_P_IPV6):
 352                         key_layer |= NFP_FLOWER_LAYER_IPV6;
 353                         key_size += sizeof(struct nfp_flower_ipv6);
 354                         break;
 355 
 356                 /* Currently we do not offload ARP
 357                  * because we rely on it to get to the host.
 358                  */
 359                 case cpu_to_be16(ETH_P_ARP):
 360                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
 361                         return -EOPNOTSUPP;
 362 
 363                 case cpu_to_be16(ETH_P_MPLS_UC):
 364                 case cpu_to_be16(ETH_P_MPLS_MC):
 365                         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
 366                                 key_layer |= NFP_FLOWER_LAYER_MAC;
 367                                 key_size += sizeof(struct nfp_flower_mac_mpls);
 368                         }
 369                         break;
 370 
 371                 /* Will be included in layer 2. */
 372                 case cpu_to_be16(ETH_P_8021Q):
 373                         break;
 374 
 375                 default:
 376                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
 377                         return -EOPNOTSUPP;
 378                 }
 379         } else if (nfp_flower_check_higher_than_mac(flow)) {
 380                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
 381                 return -EOPNOTSUPP;
 382         }
 383 
 384         if (basic.mask && basic.mask->ip_proto) {
 385                 switch (basic.key->ip_proto) {
 386                 case IPPROTO_TCP:
 387                 case IPPROTO_UDP:
 388                 case IPPROTO_SCTP:
 389                 case IPPROTO_ICMP:
 390                 case IPPROTO_ICMPV6:
 391                         key_layer |= NFP_FLOWER_LAYER_TP;
 392                         key_size += sizeof(struct nfp_flower_tp_ports);
 393                         break;
 394                 }
 395         }
 396 
 397         if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
 398             nfp_flower_check_higher_than_l3(flow)) {
 399                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
 400                 return -EOPNOTSUPP;
 401         }
 402 
 403         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
 404                 struct flow_match_tcp tcp;
 405                 u32 tcp_flags;
 406 
 407                 flow_rule_match_tcp(rule, &tcp);
 408                 tcp_flags = be16_to_cpu(tcp.key->flags);
 409 
 410                 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
 411                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
 412                         return -EOPNOTSUPP;
 413                 }
 414 
 415                 /* We only support PSH and URG flags when either
 416                  * FIN, SYN or RST is present as well.
 417                  */
 418                 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
 419                     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
 420                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
 421                         return -EOPNOTSUPP;
 422                 }
 423 
 424                 /* We need to store TCP flags in the either the IPv4 or IPv6 key
 425                  * space, thus we need to ensure we include a IPv4/IPv6 key
 426                  * layer if we have not done so already.
 427                  */
 428                 if (!basic.key) {
 429                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
 430                         return -EOPNOTSUPP;
 431                 }
 432 
 433                 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
 434                     !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
 435                         switch (basic.key->n_proto) {
 436                         case cpu_to_be16(ETH_P_IP):
 437                                 key_layer |= NFP_FLOWER_LAYER_IPV4;
 438                                 key_size += sizeof(struct nfp_flower_ipv4);
 439                                 break;
 440 
 441                         case cpu_to_be16(ETH_P_IPV6):
 442                                         key_layer |= NFP_FLOWER_LAYER_IPV6;
 443                                 key_size += sizeof(struct nfp_flower_ipv6);
 444                                 break;
 445 
 446                         default:
 447                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
 448                                 return -EOPNOTSUPP;
 449                         }
 450                 }
 451         }
 452 
 453         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
 454                 struct flow_match_control ctl;
 455 
 456                 flow_rule_match_control(rule, &ctl);
 457                 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
 458                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
 459                         return -EOPNOTSUPP;
 460                 }
 461         }
 462 
 463         ret_key_ls->key_layer = key_layer;
 464         ret_key_ls->key_layer_two = key_layer_two;
 465         ret_key_ls->key_size = key_size;
 466 
 467         return 0;
 468 }
 469 
 470 static struct nfp_fl_payload *
 471 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
 472 {
 473         struct nfp_fl_payload *flow_pay;
 474 
 475         flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
 476         if (!flow_pay)
 477                 return NULL;
 478 
 479         flow_pay->meta.key_len = key_layer->key_size;
 480         flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
 481         if (!flow_pay->unmasked_data)
 482                 goto err_free_flow;
 483 
 484         flow_pay->meta.mask_len = key_layer->key_size;
 485         flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
 486         if (!flow_pay->mask_data)
 487                 goto err_free_unmasked;
 488 
 489         flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
 490         if (!flow_pay->action_data)
 491                 goto err_free_mask;
 492 
 493         flow_pay->nfp_tun_ipv4_addr = 0;
 494         flow_pay->meta.flags = 0;
 495         INIT_LIST_HEAD(&flow_pay->linked_flows);
 496         flow_pay->in_hw = false;
 497         flow_pay->pre_tun_rule.dev = NULL;
 498 
 499         return flow_pay;
 500 
 501 err_free_mask:
 502         kfree(flow_pay->mask_data);
 503 err_free_unmasked:
 504         kfree(flow_pay->unmasked_data);
 505 err_free_flow:
 506         kfree(flow_pay);
 507         return NULL;
 508 }
 509 
 510 static int
 511 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
 512                                      struct nfp_flower_merge_check *merge,
 513                                      u8 *last_act_id, int *act_out)
 514 {
 515         struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
 516         struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
 517         struct nfp_fl_set_ip4_addrs *ipv4_add;
 518         struct nfp_fl_set_ipv6_addr *ipv6_add;
 519         struct nfp_fl_push_vlan *push_vlan;
 520         struct nfp_fl_set_tport *tport;
 521         struct nfp_fl_set_eth *eth;
 522         struct nfp_fl_act_head *a;
 523         unsigned int act_off = 0;
 524         u8 act_id = 0;
 525         u8 *ports;
 526         int i;
 527 
 528         while (act_off < flow->meta.act_len) {
 529                 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
 530                 act_id = a->jump_id;
 531 
 532                 switch (act_id) {
 533                 case NFP_FL_ACTION_OPCODE_OUTPUT:
 534                         if (act_out)
 535                                 (*act_out)++;
 536                         break;
 537                 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
 538                         push_vlan = (struct nfp_fl_push_vlan *)a;
 539                         if (push_vlan->vlan_tci)
 540                                 merge->tci = cpu_to_be16(0xffff);
 541                         break;
 542                 case NFP_FL_ACTION_OPCODE_POP_VLAN:
 543                         merge->tci = cpu_to_be16(0);
 544                         break;
 545                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
 546                         /* New tunnel header means l2 to l4 can be matched. */
 547                         eth_broadcast_addr(&merge->l2.mac_dst[0]);
 548                         eth_broadcast_addr(&merge->l2.mac_src[0]);
 549                         memset(&merge->l4, 0xff,
 550                                sizeof(struct nfp_flower_tp_ports));
 551                         memset(&merge->ipv4, 0xff,
 552                                sizeof(struct nfp_flower_ipv4));
 553                         break;
 554                 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
 555                         eth = (struct nfp_fl_set_eth *)a;
 556                         for (i = 0; i < ETH_ALEN; i++)
 557                                 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
 558                         for (i = 0; i < ETH_ALEN; i++)
 559                                 merge->l2.mac_src[i] |=
 560                                         eth->eth_addr_mask[ETH_ALEN + i];
 561                         break;
 562                 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
 563                         ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
 564                         merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
 565                         merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
 566                         break;
 567                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
 568                         ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
 569                         merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
 570                         merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
 571                         break;
 572                 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
 573                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
 574                         for (i = 0; i < 4; i++)
 575                                 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
 576                                         ipv6_add->ipv6[i].mask;
 577                         break;
 578                 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
 579                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
 580                         for (i = 0; i < 4; i++)
 581                                 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
 582                                         ipv6_add->ipv6[i].mask;
 583                         break;
 584                 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
 585                         ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
 586                         merge->ipv6.ip_ext.ttl |=
 587                                 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
 588                         merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
 589                         merge->ipv6.ipv6_flow_label_exthdr |=
 590                                 ipv6_tc_hl_fl->ipv6_label_mask;
 591                         break;
 592                 case NFP_FL_ACTION_OPCODE_SET_UDP:
 593                 case NFP_FL_ACTION_OPCODE_SET_TCP:
 594                         tport = (struct nfp_fl_set_tport *)a;
 595                         ports = (u8 *)&merge->l4.port_src;
 596                         for (i = 0; i < 4; i++)
 597                                 ports[i] |= tport->tp_port_mask[i];
 598                         break;
 599                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
 600                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
 601                 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
 602                         break;
 603                 default:
 604                         return -EOPNOTSUPP;
 605                 }
 606 
 607                 act_off += a->len_lw << NFP_FL_LW_SIZ;
 608         }
 609 
 610         if (last_act_id)
 611                 *last_act_id = act_id;
 612 
 613         return 0;
 614 }
 615 
 616 static int
 617 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
 618                                 struct nfp_flower_merge_check *merge,
 619                                 bool extra_fields)
 620 {
 621         struct nfp_flower_meta_tci *meta_tci;
 622         u8 *mask = flow->mask_data;
 623         u8 key_layer, match_size;
 624 
 625         memset(merge, 0, sizeof(struct nfp_flower_merge_check));
 626 
 627         meta_tci = (struct nfp_flower_meta_tci *)mask;
 628         key_layer = meta_tci->nfp_flow_key_layer;
 629 
 630         if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
 631                 return -EOPNOTSUPP;
 632 
 633         merge->tci = meta_tci->tci;
 634         mask += sizeof(struct nfp_flower_meta_tci);
 635 
 636         if (key_layer & NFP_FLOWER_LAYER_EXT_META)
 637                 mask += sizeof(struct nfp_flower_ext_meta);
 638 
 639         mask += sizeof(struct nfp_flower_in_port);
 640 
 641         if (key_layer & NFP_FLOWER_LAYER_MAC) {
 642                 match_size = sizeof(struct nfp_flower_mac_mpls);
 643                 memcpy(&merge->l2, mask, match_size);
 644                 mask += match_size;
 645         }
 646 
 647         if (key_layer & NFP_FLOWER_LAYER_TP) {
 648                 match_size = sizeof(struct nfp_flower_tp_ports);
 649                 memcpy(&merge->l4, mask, match_size);
 650                 mask += match_size;
 651         }
 652 
 653         if (key_layer & NFP_FLOWER_LAYER_IPV4) {
 654                 match_size = sizeof(struct nfp_flower_ipv4);
 655                 memcpy(&merge->ipv4, mask, match_size);
 656         }
 657 
 658         if (key_layer & NFP_FLOWER_LAYER_IPV6) {
 659                 match_size = sizeof(struct nfp_flower_ipv6);
 660                 memcpy(&merge->ipv6, mask, match_size);
 661         }
 662 
 663         return 0;
 664 }
 665 
 666 static int
 667 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
 668                      struct nfp_fl_payload *sub_flow2)
 669 {
 670         /* Two flows can be merged if sub_flow2 only matches on bits that are
 671          * either matched by sub_flow1 or set by a sub_flow1 action. This
 672          * ensures that every packet that hits sub_flow1 and recirculates is
 673          * guaranteed to hit sub_flow2.
 674          */
 675         struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
 676         int err, act_out = 0;
 677         u8 last_act_id = 0;
 678 
 679         err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
 680                                               true);
 681         if (err)
 682                 return err;
 683 
 684         err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
 685                                               false);
 686         if (err)
 687                 return err;
 688 
 689         err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
 690                                                    &last_act_id, &act_out);
 691         if (err)
 692                 return err;
 693 
 694         /* Must only be 1 output action and it must be the last in sequence. */
 695         if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
 696                 return -EOPNOTSUPP;
 697 
 698         /* Reject merge if sub_flow2 matches on something that is not matched
 699          * on or set in an action by sub_flow1.
 700          */
 701         err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
 702                             sub_flow1_merge.vals,
 703                             sizeof(struct nfp_flower_merge_check) * 8);
 704         if (err)
 705                 return -EINVAL;
 706 
 707         return 0;
 708 }
 709 
 710 static unsigned int
 711 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
 712                             bool *tunnel_act)
 713 {
 714         unsigned int act_off = 0, act_len;
 715         struct nfp_fl_act_head *a;
 716         u8 act_id = 0;
 717 
 718         while (act_off < len) {
 719                 a = (struct nfp_fl_act_head *)&act_src[act_off];
 720                 act_len = a->len_lw << NFP_FL_LW_SIZ;
 721                 act_id = a->jump_id;
 722 
 723                 switch (act_id) {
 724                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
 725                         if (tunnel_act)
 726                                 *tunnel_act = true;
 727                         /* fall through */
 728                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
 729                         memcpy(act_dst + act_off, act_src + act_off, act_len);
 730                         break;
 731                 default:
 732                         return act_off;
 733                 }
 734 
 735                 act_off += act_len;
 736         }
 737 
 738         return act_off;
 739 }
 740 
 741 static int
 742 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
 743 {
 744         struct nfp_fl_act_head *a;
 745         unsigned int act_off = 0;
 746 
 747         while (act_off < len) {
 748                 a = (struct nfp_fl_act_head *)&acts[act_off];
 749 
 750                 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
 751                         *vlan = (struct nfp_fl_push_vlan *)a;
 752                 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
 753                         return -EOPNOTSUPP;
 754 
 755                 act_off += a->len_lw << NFP_FL_LW_SIZ;
 756         }
 757 
 758         /* Ensure any VLAN push also has an egress action. */
 759         if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
 760                 return -EOPNOTSUPP;
 761 
 762         return 0;
 763 }
 764 
 765 static int
 766 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
 767 {
 768         struct nfp_fl_set_ipv4_tun *tun;
 769         struct nfp_fl_act_head *a;
 770         unsigned int act_off = 0;
 771 
 772         while (act_off < len) {
 773                 a = (struct nfp_fl_act_head *)&acts[act_off];
 774 
 775                 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
 776                         tun = (struct nfp_fl_set_ipv4_tun *)a;
 777                         tun->outer_vlan_tpid = vlan->vlan_tpid;
 778                         tun->outer_vlan_tci = vlan->vlan_tci;
 779 
 780                         return 0;
 781                 }
 782 
 783                 act_off += a->len_lw << NFP_FL_LW_SIZ;
 784         }
 785 
 786         /* Return error if no tunnel action is found. */
 787         return -EOPNOTSUPP;
 788 }
 789 
 790 static int
 791 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
 792                         struct nfp_fl_payload *sub_flow2,
 793                         struct nfp_fl_payload *merge_flow)
 794 {
 795         unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
 796         struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
 797         bool tunnel_act = false;
 798         char *merge_act;
 799         int err;
 800 
 801         /* The last action of sub_flow1 must be output - do not merge this. */
 802         sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
 803         sub2_act_len = sub_flow2->meta.act_len;
 804 
 805         if (!sub2_act_len)
 806                 return -EINVAL;
 807 
 808         if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
 809                 return -EINVAL;
 810 
 811         /* A shortcut can only be applied if there is a single action. */
 812         if (sub1_act_len)
 813                 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 814         else
 815                 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
 816 
 817         merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
 818         merge_act = merge_flow->action_data;
 819 
 820         /* Copy any pre-actions to the start of merge flow action list. */
 821         pre_off1 = nfp_flower_copy_pre_actions(merge_act,
 822                                                sub_flow1->action_data,
 823                                                sub1_act_len, &tunnel_act);
 824         merge_act += pre_off1;
 825         sub1_act_len -= pre_off1;
 826         pre_off2 = nfp_flower_copy_pre_actions(merge_act,
 827                                                sub_flow2->action_data,
 828                                                sub2_act_len, NULL);
 829         merge_act += pre_off2;
 830         sub2_act_len -= pre_off2;
 831 
 832         /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
 833          * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
 834          * valid merge.
 835          */
 836         if (tunnel_act) {
 837                 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
 838 
 839                 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
 840                                                   &post_tun_push_vlan);
 841                 if (err)
 842                         return err;
 843 
 844                 if (post_tun_push_vlan) {
 845                         pre_off2 += sizeof(*post_tun_push_vlan);
 846                         sub2_act_len -= sizeof(*post_tun_push_vlan);
 847                 }
 848         }
 849 
 850         /* Copy remaining actions from sub_flows 1 and 2. */
 851         memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
 852 
 853         if (post_tun_push_vlan) {
 854                 /* Update tunnel action in merge to include VLAN push. */
 855                 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
 856                                                  post_tun_push_vlan);
 857                 if (err)
 858                         return err;
 859 
 860                 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
 861         }
 862 
 863         merge_act += sub1_act_len;
 864         memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
 865 
 866         return 0;
 867 }
 868 
 869 /* Flow link code should only be accessed under RTNL. */
 870 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
 871 {
 872         list_del(&link->merge_flow.list);
 873         list_del(&link->sub_flow.list);
 874         kfree(link);
 875 }
 876 
 877 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
 878                                     struct nfp_fl_payload *sub_flow)
 879 {
 880         struct nfp_fl_payload_link *link;
 881 
 882         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
 883                 if (link->sub_flow.flow == sub_flow) {
 884                         nfp_flower_unlink_flow(link);
 885                         return;
 886                 }
 887 }
 888 
 889 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
 890                                  struct nfp_fl_payload *sub_flow)
 891 {
 892         struct nfp_fl_payload_link *link;
 893 
 894         link = kmalloc(sizeof(*link), GFP_KERNEL);
 895         if (!link)
 896                 return -ENOMEM;
 897 
 898         link->merge_flow.flow = merge_flow;
 899         list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
 900         link->sub_flow.flow = sub_flow;
 901         list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
 902 
 903         return 0;
 904 }
 905 
 906 /**
 907  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
 908  * @app:        Pointer to the APP handle
 909  * @sub_flow1:  Initial flow matched to produce merge hint
 910  * @sub_flow2:  Post recirculation flow matched in merge hint
 911  *
 912  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
 913  * and offloading the new, merged flow.
 914  *
 915  * Return: negative value on error, 0 in success.
 916  */
 917 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
 918                                      struct nfp_fl_payload *sub_flow1,
 919                                      struct nfp_fl_payload *sub_flow2)
 920 {
 921         struct flow_cls_offload merge_tc_off;
 922         struct nfp_flower_priv *priv = app->priv;
 923         struct netlink_ext_ack *extack = NULL;
 924         struct nfp_fl_payload *merge_flow;
 925         struct nfp_fl_key_ls merge_key_ls;
 926         int err;
 927 
 928         ASSERT_RTNL();
 929 
 930         extack = merge_tc_off.common.extack;
 931         if (sub_flow1 == sub_flow2 ||
 932             nfp_flower_is_merge_flow(sub_flow1) ||
 933             nfp_flower_is_merge_flow(sub_flow2))
 934                 return -EINVAL;
 935 
 936         err = nfp_flower_can_merge(sub_flow1, sub_flow2);
 937         if (err)
 938                 return err;
 939 
 940         merge_key_ls.key_size = sub_flow1->meta.key_len;
 941 
 942         merge_flow = nfp_flower_allocate_new(&merge_key_ls);
 943         if (!merge_flow)
 944                 return -ENOMEM;
 945 
 946         merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
 947         merge_flow->ingress_dev = sub_flow1->ingress_dev;
 948 
 949         memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
 950                sub_flow1->meta.key_len);
 951         memcpy(merge_flow->mask_data, sub_flow1->mask_data,
 952                sub_flow1->meta.mask_len);
 953 
 954         err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
 955         if (err)
 956                 goto err_destroy_merge_flow;
 957 
 958         err = nfp_flower_link_flows(merge_flow, sub_flow1);
 959         if (err)
 960                 goto err_destroy_merge_flow;
 961 
 962         err = nfp_flower_link_flows(merge_flow, sub_flow2);
 963         if (err)
 964                 goto err_unlink_sub_flow1;
 965 
 966         merge_tc_off.cookie = merge_flow->tc_flower_cookie;
 967         err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
 968                                         merge_flow->ingress_dev, extack);
 969         if (err)
 970                 goto err_unlink_sub_flow2;
 971 
 972         err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
 973                                      nfp_flower_table_params);
 974         if (err)
 975                 goto err_release_metadata;
 976 
 977         err = nfp_flower_xmit_flow(app, merge_flow,
 978                                    NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
 979         if (err)
 980                 goto err_remove_rhash;
 981 
 982         merge_flow->in_hw = true;
 983         sub_flow1->in_hw = false;
 984 
 985         return 0;
 986 
 987 err_remove_rhash:
 988         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
 989                                             &merge_flow->fl_node,
 990                                             nfp_flower_table_params));
 991 err_release_metadata:
 992         nfp_modify_flow_metadata(app, merge_flow);
 993 err_unlink_sub_flow2:
 994         nfp_flower_unlink_flows(merge_flow, sub_flow2);
 995 err_unlink_sub_flow1:
 996         nfp_flower_unlink_flows(merge_flow, sub_flow1);
 997 err_destroy_merge_flow:
 998         kfree(merge_flow->action_data);
 999         kfree(merge_flow->mask_data);
1000         kfree(merge_flow->unmasked_data);
1001         kfree(merge_flow);
1002         return err;
1003 }
1004 
1005 /**
1006  * nfp_flower_validate_pre_tun_rule()
1007  * @app:        Pointer to the APP handle
1008  * @flow:       Pointer to NFP flow representation of rule
1009  * @extack:     Netlink extended ACK report
1010  *
1011  * Verifies the flow as a pre-tunnel rule.
1012  *
1013  * Return: negative value on error, 0 if verified.
1014  */
1015 static int
1016 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1017                                  struct nfp_fl_payload *flow,
1018                                  struct netlink_ext_ack *extack)
1019 {
1020         struct nfp_flower_meta_tci *meta_tci;
1021         struct nfp_flower_mac_mpls *mac;
1022         struct nfp_fl_act_head *act;
1023         u8 *mask = flow->mask_data;
1024         bool vlan = false;
1025         int act_offset;
1026         u8 key_layer;
1027 
1028         meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1029         if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1030                 u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1031 
1032                 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1033                 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1034                 vlan = true;
1035         } else {
1036                 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1037         }
1038 
1039         key_layer = meta_tci->nfp_flow_key_layer;
1040         if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1041                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1042                 return -EOPNOTSUPP;
1043         }
1044 
1045         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1046                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1047                 return -EOPNOTSUPP;
1048         }
1049 
1050         /* Skip fields known to exist. */
1051         mask += sizeof(struct nfp_flower_meta_tci);
1052         mask += sizeof(struct nfp_flower_in_port);
1053 
1054         /* Ensure destination MAC address is fully matched. */
1055         mac = (struct nfp_flower_mac_mpls *)mask;
1056         if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1057                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1058                 return -EOPNOTSUPP;
1059         }
1060 
1061         if (key_layer & NFP_FLOWER_LAYER_IPV4) {
1062                 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1063                 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1064                 int i;
1065 
1066                 mask += sizeof(struct nfp_flower_mac_mpls);
1067 
1068                 /* Ensure proto and flags are the only IP layer fields. */
1069                 for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
1070                         if (mask[i] && i != ip_flags && i != ip_proto) {
1071                                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1072                                 return -EOPNOTSUPP;
1073                         }
1074         }
1075 
1076         /* Action must be a single egress or pop_vlan and egress. */
1077         act_offset = 0;
1078         act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1079         if (vlan) {
1080                 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1081                         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1082                         return -EOPNOTSUPP;
1083                 }
1084 
1085                 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1086                 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1087         }
1088 
1089         if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1090                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1091                 return -EOPNOTSUPP;
1092         }
1093 
1094         act_offset += act->len_lw << NFP_FL_LW_SIZ;
1095 
1096         /* Ensure there are no more actions after egress. */
1097         if (act_offset != flow->meta.act_len) {
1098                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1099                 return -EOPNOTSUPP;
1100         }
1101 
1102         return 0;
1103 }
1104 
1105 /**
1106  * nfp_flower_add_offload() - Adds a new flow to hardware.
1107  * @app:        Pointer to the APP handle
1108  * @netdev:     netdev structure.
1109  * @flow:       TC flower classifier offload structure.
1110  *
1111  * Adds a new flow to the repeated hash structure and action payload.
1112  *
1113  * Return: negative value on error, 0 if configured successfully.
1114  */
1115 static int
1116 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1117                        struct flow_cls_offload *flow)
1118 {
1119         enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1120         struct nfp_flower_priv *priv = app->priv;
1121         struct netlink_ext_ack *extack = NULL;
1122         struct nfp_fl_payload *flow_pay;
1123         struct nfp_fl_key_ls *key_layer;
1124         struct nfp_port *port = NULL;
1125         int err;
1126 
1127         extack = flow->common.extack;
1128         if (nfp_netdev_is_nfp_repr(netdev))
1129                 port = nfp_port_from_netdev(netdev);
1130 
1131         key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1132         if (!key_layer)
1133                 return -ENOMEM;
1134 
1135         err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1136                                               &tun_type, extack);
1137         if (err)
1138                 goto err_free_key_ls;
1139 
1140         flow_pay = nfp_flower_allocate_new(key_layer);
1141         if (!flow_pay) {
1142                 err = -ENOMEM;
1143                 goto err_free_key_ls;
1144         }
1145 
1146         err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1147                                             flow_pay, tun_type, extack);
1148         if (err)
1149                 goto err_destroy_flow;
1150 
1151         err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1152         if (err)
1153                 goto err_destroy_flow;
1154 
1155         if (flow_pay->pre_tun_rule.dev) {
1156                 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
1157                 if (err)
1158                         goto err_destroy_flow;
1159         }
1160 
1161         err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1162         if (err)
1163                 goto err_destroy_flow;
1164 
1165         flow_pay->tc_flower_cookie = flow->cookie;
1166         err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1167                                      nfp_flower_table_params);
1168         if (err) {
1169                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1170                 goto err_release_metadata;
1171         }
1172 
1173         if (flow_pay->pre_tun_rule.dev)
1174                 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1175         else
1176                 err = nfp_flower_xmit_flow(app, flow_pay,
1177                                            NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1178         if (err)
1179                 goto err_remove_rhash;
1180 
1181         if (port)
1182                 port->tc_offload_cnt++;
1183 
1184         flow_pay->in_hw = true;
1185 
1186         /* Deallocate flow payload when flower rule has been destroyed. */
1187         kfree(key_layer);
1188 
1189         return 0;
1190 
1191 err_remove_rhash:
1192         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1193                                             &flow_pay->fl_node,
1194                                             nfp_flower_table_params));
1195 err_release_metadata:
1196         nfp_modify_flow_metadata(app, flow_pay);
1197 err_destroy_flow:
1198         kfree(flow_pay->action_data);
1199         kfree(flow_pay->mask_data);
1200         kfree(flow_pay->unmasked_data);
1201         kfree(flow_pay);
1202 err_free_key_ls:
1203         kfree(key_layer);
1204         return err;
1205 }
1206 
1207 static void
1208 nfp_flower_remove_merge_flow(struct nfp_app *app,
1209                              struct nfp_fl_payload *del_sub_flow,
1210                              struct nfp_fl_payload *merge_flow)
1211 {
1212         struct nfp_flower_priv *priv = app->priv;
1213         struct nfp_fl_payload_link *link, *temp;
1214         struct nfp_fl_payload *origin;
1215         bool mod = false;
1216         int err;
1217 
1218         link = list_first_entry(&merge_flow->linked_flows,
1219                                 struct nfp_fl_payload_link, merge_flow.list);
1220         origin = link->sub_flow.flow;
1221 
1222         /* Re-add rule the merge had overwritten if it has not been deleted. */
1223         if (origin != del_sub_flow)
1224                 mod = true;
1225 
1226         err = nfp_modify_flow_metadata(app, merge_flow);
1227         if (err) {
1228                 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1229                 goto err_free_links;
1230         }
1231 
1232         if (!mod) {
1233                 err = nfp_flower_xmit_flow(app, merge_flow,
1234                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1235                 if (err) {
1236                         nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1237                         goto err_free_links;
1238                 }
1239         } else {
1240                 __nfp_modify_flow_metadata(priv, origin);
1241                 err = nfp_flower_xmit_flow(app, origin,
1242                                            NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1243                 if (err)
1244                         nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1245                 origin->in_hw = true;
1246         }
1247 
1248 err_free_links:
1249         /* Clean any links connected with the merged flow. */
1250         list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1251                                  merge_flow.list)
1252                 nfp_flower_unlink_flow(link);
1253 
1254         kfree(merge_flow->action_data);
1255         kfree(merge_flow->mask_data);
1256         kfree(merge_flow->unmasked_data);
1257         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1258                                             &merge_flow->fl_node,
1259                                             nfp_flower_table_params));
1260         kfree_rcu(merge_flow, rcu);
1261 }
1262 
1263 static void
1264 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1265                                   struct nfp_fl_payload *sub_flow)
1266 {
1267         struct nfp_fl_payload_link *link, *temp;
1268 
1269         /* Remove any merge flow formed from the deleted sub_flow. */
1270         list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1271                                  sub_flow.list)
1272                 nfp_flower_remove_merge_flow(app, sub_flow,
1273                                              link->merge_flow.flow);
1274 }
1275 
1276 /**
1277  * nfp_flower_del_offload() - Removes a flow from hardware.
1278  * @app:        Pointer to the APP handle
1279  * @netdev:     netdev structure.
1280  * @flow:       TC flower classifier offload structure
1281  *
1282  * Removes a flow from the repeated hash structure and clears the
1283  * action payload. Any flows merged from this are also deleted.
1284  *
1285  * Return: negative value on error, 0 if removed successfully.
1286  */
1287 static int
1288 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1289                        struct flow_cls_offload *flow)
1290 {
1291         struct nfp_flower_priv *priv = app->priv;
1292         struct netlink_ext_ack *extack = NULL;
1293         struct nfp_fl_payload *nfp_flow;
1294         struct nfp_port *port = NULL;
1295         int err;
1296 
1297         extack = flow->common.extack;
1298         if (nfp_netdev_is_nfp_repr(netdev))
1299                 port = nfp_port_from_netdev(netdev);
1300 
1301         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1302         if (!nfp_flow) {
1303                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1304                 return -ENOENT;
1305         }
1306 
1307         err = nfp_modify_flow_metadata(app, nfp_flow);
1308         if (err)
1309                 goto err_free_merge_flow;
1310 
1311         if (nfp_flow->nfp_tun_ipv4_addr)
1312                 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1313 
1314         if (!nfp_flow->in_hw) {
1315                 err = 0;
1316                 goto err_free_merge_flow;
1317         }
1318 
1319         if (nfp_flow->pre_tun_rule.dev)
1320                 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1321         else
1322                 err = nfp_flower_xmit_flow(app, nfp_flow,
1323                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1324         /* Fall through on error. */
1325 
1326 err_free_merge_flow:
1327         nfp_flower_del_linked_merge_flows(app, nfp_flow);
1328         if (port)
1329                 port->tc_offload_cnt--;
1330         kfree(nfp_flow->action_data);
1331         kfree(nfp_flow->mask_data);
1332         kfree(nfp_flow->unmasked_data);
1333         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1334                                             &nfp_flow->fl_node,
1335                                             nfp_flower_table_params));
1336         kfree_rcu(nfp_flow, rcu);
1337         return err;
1338 }
1339 
1340 static void
1341 __nfp_flower_update_merge_stats(struct nfp_app *app,
1342                                 struct nfp_fl_payload *merge_flow)
1343 {
1344         struct nfp_flower_priv *priv = app->priv;
1345         struct nfp_fl_payload_link *link;
1346         struct nfp_fl_payload *sub_flow;
1347         u64 pkts, bytes, used;
1348         u32 ctx_id;
1349 
1350         ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1351         pkts = priv->stats[ctx_id].pkts;
1352         /* Do not cycle subflows if no stats to distribute. */
1353         if (!pkts)
1354                 return;
1355         bytes = priv->stats[ctx_id].bytes;
1356         used = priv->stats[ctx_id].used;
1357 
1358         /* Reset stats for the merge flow. */
1359         priv->stats[ctx_id].pkts = 0;
1360         priv->stats[ctx_id].bytes = 0;
1361 
1362         /* The merge flow has received stats updates from firmware.
1363          * Distribute these stats to all subflows that form the merge.
1364          * The stats will collected from TC via the subflows.
1365          */
1366         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1367                 sub_flow = link->sub_flow.flow;
1368                 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1369                 priv->stats[ctx_id].pkts += pkts;
1370                 priv->stats[ctx_id].bytes += bytes;
1371                 priv->stats[ctx_id].used = max_t(u64, used,
1372                                                  priv->stats[ctx_id].used);
1373         }
1374 }
1375 
1376 static void
1377 nfp_flower_update_merge_stats(struct nfp_app *app,
1378                               struct nfp_fl_payload *sub_flow)
1379 {
1380         struct nfp_fl_payload_link *link;
1381 
1382         /* Get merge flows that the subflow forms to distribute their stats. */
1383         list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1384                 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1385 }
1386 
1387 /**
1388  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1389  * @app:        Pointer to the APP handle
1390  * @netdev:     Netdev structure.
1391  * @flow:       TC flower classifier offload structure
1392  *
1393  * Populates a flow statistics structure which which corresponds to a
1394  * specific flow.
1395  *
1396  * Return: negative value on error, 0 if stats populated successfully.
1397  */
1398 static int
1399 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1400                      struct flow_cls_offload *flow)
1401 {
1402         struct nfp_flower_priv *priv = app->priv;
1403         struct netlink_ext_ack *extack = NULL;
1404         struct nfp_fl_payload *nfp_flow;
1405         u32 ctx_id;
1406 
1407         extack = flow->common.extack;
1408         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1409         if (!nfp_flow) {
1410                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1411                 return -EINVAL;
1412         }
1413 
1414         ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1415 
1416         spin_lock_bh(&priv->stats_lock);
1417         /* If request is for a sub_flow, update stats from merged flows. */
1418         if (!list_empty(&nfp_flow->linked_flows))
1419                 nfp_flower_update_merge_stats(app, nfp_flow);
1420 
1421         flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1422                           priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1423 
1424         priv->stats[ctx_id].pkts = 0;
1425         priv->stats[ctx_id].bytes = 0;
1426         spin_unlock_bh(&priv->stats_lock);
1427 
1428         return 0;
1429 }
1430 
1431 static int
1432 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1433                         struct flow_cls_offload *flower)
1434 {
1435         if (!eth_proto_is_802_3(flower->common.protocol))
1436                 return -EOPNOTSUPP;
1437 
1438         switch (flower->command) {
1439         case FLOW_CLS_REPLACE:
1440                 return nfp_flower_add_offload(app, netdev, flower);
1441         case FLOW_CLS_DESTROY:
1442                 return nfp_flower_del_offload(app, netdev, flower);
1443         case FLOW_CLS_STATS:
1444                 return nfp_flower_get_stats(app, netdev, flower);
1445         default:
1446                 return -EOPNOTSUPP;
1447         }
1448 }
1449 
1450 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1451                                         void *type_data, void *cb_priv)
1452 {
1453         struct nfp_repr *repr = cb_priv;
1454 
1455         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1456                 return -EOPNOTSUPP;
1457 
1458         switch (type) {
1459         case TC_SETUP_CLSFLOWER:
1460                 return nfp_flower_repr_offload(repr->app, repr->netdev,
1461                                                type_data);
1462         case TC_SETUP_CLSMATCHALL:
1463                 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1464                                                     type_data);
1465         default:
1466                 return -EOPNOTSUPP;
1467         }
1468 }
1469 
1470 static LIST_HEAD(nfp_block_cb_list);
1471 
1472 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1473                                      struct flow_block_offload *f)
1474 {
1475         struct nfp_repr *repr = netdev_priv(netdev);
1476         struct nfp_flower_repr_priv *repr_priv;
1477         struct flow_block_cb *block_cb;
1478 
1479         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1480                 return -EOPNOTSUPP;
1481 
1482         repr_priv = repr->app_priv;
1483         repr_priv->block_shared = f->block_shared;
1484         f->driver_block_list = &nfp_block_cb_list;
1485 
1486         switch (f->command) {
1487         case FLOW_BLOCK_BIND:
1488                 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1489                                           &nfp_block_cb_list))
1490                         return -EBUSY;
1491 
1492                 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1493                                                repr, repr, NULL);
1494                 if (IS_ERR(block_cb))
1495                         return PTR_ERR(block_cb);
1496 
1497                 flow_block_cb_add(block_cb, f);
1498                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1499                 return 0;
1500         case FLOW_BLOCK_UNBIND:
1501                 block_cb = flow_block_cb_lookup(f->block,
1502                                                 nfp_flower_setup_tc_block_cb,
1503                                                 repr);
1504                 if (!block_cb)
1505                         return -ENOENT;
1506 
1507                 flow_block_cb_remove(block_cb, f);
1508                 list_del(&block_cb->driver_list);
1509                 return 0;
1510         default:
1511                 return -EOPNOTSUPP;
1512         }
1513 }
1514 
1515 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1516                         enum tc_setup_type type, void *type_data)
1517 {
1518         switch (type) {
1519         case TC_SETUP_BLOCK:
1520                 return nfp_flower_setup_tc_block(netdev, type_data);
1521         default:
1522                 return -EOPNOTSUPP;
1523         }
1524 }
1525 
1526 struct nfp_flower_indr_block_cb_priv {
1527         struct net_device *netdev;
1528         struct nfp_app *app;
1529         struct list_head list;
1530 };
1531 
1532 static struct nfp_flower_indr_block_cb_priv *
1533 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1534                                      struct net_device *netdev)
1535 {
1536         struct nfp_flower_indr_block_cb_priv *cb_priv;
1537         struct nfp_flower_priv *priv = app->priv;
1538 
1539         /* All callback list access should be protected by RTNL. */
1540         ASSERT_RTNL();
1541 
1542         list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1543                 if (cb_priv->netdev == netdev)
1544                         return cb_priv;
1545 
1546         return NULL;
1547 }
1548 
1549 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1550                                           void *type_data, void *cb_priv)
1551 {
1552         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1553         struct flow_cls_offload *flower = type_data;
1554 
1555         if (flower->common.chain_index)
1556                 return -EOPNOTSUPP;
1557 
1558         switch (type) {
1559         case TC_SETUP_CLSFLOWER:
1560                 return nfp_flower_repr_offload(priv->app, priv->netdev,
1561                                                type_data);
1562         default:
1563                 return -EOPNOTSUPP;
1564         }
1565 }
1566 
1567 static void nfp_flower_setup_indr_tc_release(void *cb_priv)
1568 {
1569         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1570 
1571         list_del(&priv->list);
1572         kfree(priv);
1573 }
1574 
1575 static int
1576 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1577                                struct flow_block_offload *f)
1578 {
1579         struct nfp_flower_indr_block_cb_priv *cb_priv;
1580         struct nfp_flower_priv *priv = app->priv;
1581         struct flow_block_cb *block_cb;
1582 
1583         if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1584              !nfp_flower_internal_port_can_offload(app, netdev)) ||
1585             (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1586              nfp_flower_internal_port_can_offload(app, netdev)))
1587                 return -EOPNOTSUPP;
1588 
1589         switch (f->command) {
1590         case FLOW_BLOCK_BIND:
1591                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1592                 if (cb_priv &&
1593                     flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1594                                           cb_priv,
1595                                           &nfp_block_cb_list))
1596                         return -EBUSY;
1597 
1598                 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1599                 if (!cb_priv)
1600                         return -ENOMEM;
1601 
1602                 cb_priv->netdev = netdev;
1603                 cb_priv->app = app;
1604                 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1605 
1606                 block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1607                                                cb_priv, cb_priv,
1608                                                nfp_flower_setup_indr_tc_release);
1609                 if (IS_ERR(block_cb)) {
1610                         list_del(&cb_priv->list);
1611                         kfree(cb_priv);
1612                         return PTR_ERR(block_cb);
1613                 }
1614 
1615                 flow_block_cb_add(block_cb, f);
1616                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1617                 return 0;
1618         case FLOW_BLOCK_UNBIND:
1619                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1620                 if (!cb_priv)
1621                         return -ENOENT;
1622 
1623                 block_cb = flow_block_cb_lookup(f->block,
1624                                                 nfp_flower_setup_indr_block_cb,
1625                                                 cb_priv);
1626                 if (!block_cb)
1627                         return -ENOENT;
1628 
1629                 flow_block_cb_remove(block_cb, f);
1630                 list_del(&block_cb->driver_list);
1631                 return 0;
1632         default:
1633                 return -EOPNOTSUPP;
1634         }
1635         return 0;
1636 }
1637 
1638 static int
1639 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1640                             enum tc_setup_type type, void *type_data)
1641 {
1642         switch (type) {
1643         case TC_SETUP_BLOCK:
1644                 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1645                                                       type_data);
1646         default:
1647                 return -EOPNOTSUPP;
1648         }
1649 }
1650 
1651 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1652                                        struct net_device *netdev,
1653                                        unsigned long event)
1654 {
1655         int err;
1656 
1657         if (!nfp_fl_is_netdev_to_offload(netdev))
1658                 return NOTIFY_OK;
1659 
1660         if (event == NETDEV_REGISTER) {
1661                 err = __flow_indr_block_cb_register(netdev, app,
1662                                                     nfp_flower_indr_setup_tc_cb,
1663                                                     app);
1664                 if (err)
1665                         nfp_flower_cmsg_warn(app,
1666                                              "Indirect block reg failed - %s\n",
1667                                              netdev->name);
1668         } else if (event == NETDEV_UNREGISTER) {
1669                 __flow_indr_block_cb_unregister(netdev,
1670                                                 nfp_flower_indr_setup_tc_cb,
1671                                                 app);
1672         }
1673 
1674         return NOTIFY_OK;
1675 }

/* [<][>][^][v][top][bottom][index][help] */