root/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mlx5dr_icm_next_higher_chunk
  2. mlx5dr_htbl_put
  3. mlx5dr_htbl_get
  4. mlx5dr_ste_put
  5. mlx5dr_ste_get
  6. mlx5dr_matcher_supp_flex_parser_icmp_v4
  7. mlx5dr_matcher_supp_flex_parser_icmp_v6
  8. mlx5dr_icm_pool_chunk_size_to_entries
  9. mlx5dr_icm_pool_chunk_size_to_byte
  10. mlx5dr_get_vport_cap

   1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2 /* Copyright (c) 2019, Mellanox Technologies */
   3 
   4 #ifndef _DR_TYPES_
   5 #define _DR_TYPES_
   6 
   7 #include <linux/mlx5/driver.h>
   8 #include <linux/refcount.h>
   9 #include "fs_core.h"
  10 #include "wq.h"
  11 #include "lib/mlx5.h"
  12 #include "mlx5_ifc_dr.h"
  13 #include "mlx5dr.h"
  14 
  15 #define DR_RULE_MAX_STES 17
  16 #define DR_ACTION_MAX_STES 5
  17 #define WIRE_PORT 0xFFFF
  18 #define DR_STE_SVLAN 0x1
  19 #define DR_STE_CVLAN 0x2
  20 
  21 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
  22 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
  23 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
  24 
  25 enum mlx5dr_icm_chunk_size {
  26         DR_CHUNK_SIZE_1,
  27         DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
  28         DR_CHUNK_SIZE_2,
  29         DR_CHUNK_SIZE_4,
  30         DR_CHUNK_SIZE_8,
  31         DR_CHUNK_SIZE_16,
  32         DR_CHUNK_SIZE_32,
  33         DR_CHUNK_SIZE_64,
  34         DR_CHUNK_SIZE_128,
  35         DR_CHUNK_SIZE_256,
  36         DR_CHUNK_SIZE_512,
  37         DR_CHUNK_SIZE_1K,
  38         DR_CHUNK_SIZE_2K,
  39         DR_CHUNK_SIZE_4K,
  40         DR_CHUNK_SIZE_8K,
  41         DR_CHUNK_SIZE_16K,
  42         DR_CHUNK_SIZE_32K,
  43         DR_CHUNK_SIZE_64K,
  44         DR_CHUNK_SIZE_128K,
  45         DR_CHUNK_SIZE_256K,
  46         DR_CHUNK_SIZE_512K,
  47         DR_CHUNK_SIZE_1024K,
  48         DR_CHUNK_SIZE_2048K,
  49         DR_CHUNK_SIZE_MAX,
  50 };
  51 
  52 enum mlx5dr_icm_type {
  53         DR_ICM_TYPE_STE,
  54         DR_ICM_TYPE_MODIFY_ACTION,
  55 };
  56 
  57 static inline enum mlx5dr_icm_chunk_size
  58 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
  59 {
  60         chunk += 2;
  61         if (chunk < DR_CHUNK_SIZE_MAX)
  62                 return chunk;
  63 
  64         return DR_CHUNK_SIZE_MAX;
  65 }
  66 
  67 enum {
  68         DR_STE_SIZE = 64,
  69         DR_STE_SIZE_CTRL = 32,
  70         DR_STE_SIZE_TAG = 16,
  71         DR_STE_SIZE_MASK = 16,
  72 };
  73 
  74 enum {
  75         DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
  76 };
  77 
  78 enum {
  79         DR_MODIFY_ACTION_SIZE = 8,
  80 };
  81 
  82 enum mlx5dr_matcher_criteria {
  83         DR_MATCHER_CRITERIA_EMPTY = 0,
  84         DR_MATCHER_CRITERIA_OUTER = 1 << 0,
  85         DR_MATCHER_CRITERIA_MISC = 1 << 1,
  86         DR_MATCHER_CRITERIA_INNER = 1 << 2,
  87         DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
  88         DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
  89         DR_MATCHER_CRITERIA_MAX = 1 << 5,
  90 };
  91 
  92 enum mlx5dr_action_type {
  93         DR_ACTION_TYP_TNL_L2_TO_L2,
  94         DR_ACTION_TYP_L2_TO_TNL_L2,
  95         DR_ACTION_TYP_TNL_L3_TO_L2,
  96         DR_ACTION_TYP_L2_TO_TNL_L3,
  97         DR_ACTION_TYP_DROP,
  98         DR_ACTION_TYP_QP,
  99         DR_ACTION_TYP_FT,
 100         DR_ACTION_TYP_CTR,
 101         DR_ACTION_TYP_TAG,
 102         DR_ACTION_TYP_MODIFY_HDR,
 103         DR_ACTION_TYP_VPORT,
 104         DR_ACTION_TYP_POP_VLAN,
 105         DR_ACTION_TYP_PUSH_VLAN,
 106         DR_ACTION_TYP_MAX,
 107 };
 108 
 109 struct mlx5dr_icm_pool;
 110 struct mlx5dr_icm_chunk;
 111 struct mlx5dr_icm_bucket;
 112 struct mlx5dr_ste_htbl;
 113 struct mlx5dr_match_param;
 114 struct mlx5dr_cmd_caps;
 115 struct mlx5dr_matcher_rx_tx;
 116 
 117 struct mlx5dr_ste {
 118         u8 *hw_ste;
 119         /* refcount: indicates the num of rules that using this ste */
 120         u32 refcount;
 121 
 122         /* attached to the miss_list head at each htbl entry */
 123         struct list_head miss_list_node;
 124 
 125         /* each rule member that uses this ste attached here */
 126         struct list_head rule_list;
 127 
 128         /* this ste is member of htbl */
 129         struct mlx5dr_ste_htbl *htbl;
 130 
 131         struct mlx5dr_ste_htbl *next_htbl;
 132 
 133         /* this ste is part of a rule, located in ste's chain */
 134         u8 ste_chain_location;
 135 };
 136 
 137 struct mlx5dr_ste_htbl_ctrl {
 138         /* total number of valid entries belonging to this hash table. This
 139          * includes the non collision and collision entries
 140          */
 141         unsigned int num_of_valid_entries;
 142 
 143         /* total number of collisions entries attached to this table */
 144         unsigned int num_of_collisions;
 145         unsigned int increase_threshold;
 146         u8 may_grow:1;
 147 };
 148 
 149 struct mlx5dr_ste_htbl {
 150         u8 lu_type;
 151         u16 byte_mask;
 152         u32 refcount;
 153         struct mlx5dr_icm_chunk *chunk;
 154         struct mlx5dr_ste *ste_arr;
 155         u8 *hw_ste_arr;
 156 
 157         struct list_head *miss_list;
 158 
 159         enum mlx5dr_icm_chunk_size chunk_size;
 160         struct mlx5dr_ste *pointing_ste;
 161 
 162         struct mlx5dr_ste_htbl_ctrl ctrl;
 163 };
 164 
 165 struct mlx5dr_ste_send_info {
 166         struct mlx5dr_ste *ste;
 167         struct list_head send_list;
 168         u16 size;
 169         u16 offset;
 170         u8 data_cont[DR_STE_SIZE];
 171         u8 *data;
 172 };
 173 
 174 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
 175                                                u16 offset, u8 *data,
 176                                                struct mlx5dr_ste_send_info *ste_info,
 177                                                struct list_head *send_list,
 178                                                bool copy_data);
 179 
 180 struct mlx5dr_ste_build {
 181         u8 inner:1;
 182         u8 rx:1;
 183         u8 vhca_id_valid:1;
 184         struct mlx5dr_domain *dmn;
 185         struct mlx5dr_cmd_caps *caps;
 186         u8 lu_type;
 187         u16 byte_mask;
 188         u8 bit_mask[DR_STE_SIZE_MASK];
 189         int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
 190                                   struct mlx5dr_ste_build *sb,
 191                                   u8 *hw_ste_p);
 192 };
 193 
 194 struct mlx5dr_ste_htbl *
 195 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 196                       enum mlx5dr_icm_chunk_size chunk_size,
 197                       u8 lu_type, u16 byte_mask);
 198 
 199 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
 200 
 201 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
 202 {
 203         htbl->refcount--;
 204         if (!htbl->refcount)
 205                 mlx5dr_ste_htbl_free(htbl);
 206 }
 207 
 208 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
 209 {
 210         htbl->refcount++;
 211 }
 212 
 213 /* STE utils */
 214 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
 215 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
 216 void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
 217                                 struct mlx5dr_ste_htbl *next_htbl);
 218 void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
 219 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
 220 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
 221 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
 222 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
 223 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
 224 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
 225 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
 226                                 u8 ste_location);
 227 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
 228 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
 229 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
 230                              int size, bool encap_l3);
 231 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
 232 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
 233 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
 234 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
 235                                  bool go_back);
 236 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
 237 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
 238 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
 239                                     u32 re_write_index);
 240 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
 241 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
 242 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
 243 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
 244 
 245 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
 246                      struct mlx5dr_matcher *matcher,
 247                      struct mlx5dr_matcher_rx_tx *nic_matcher);
 248 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
 249                                   struct mlx5dr_matcher *matcher,
 250                                   struct mlx5dr_matcher_rx_tx *nic_matcher)
 251 {
 252         ste->refcount--;
 253         if (!ste->refcount)
 254                 mlx5dr_ste_free(ste, matcher, nic_matcher);
 255 }
 256 
 257 /* initial as 0, increased only when ste appears in a new rule */
 258 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
 259 {
 260         ste->refcount++;
 261 }
 262 
 263 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
 264                                           struct mlx5dr_ste_htbl *next_htbl);
 265 bool mlx5dr_ste_equal_tag(void *src, void *dst);
 266 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
 267                                 struct mlx5dr_matcher_rx_tx *nic_matcher,
 268                                 struct mlx5dr_ste *ste,
 269                                 u8 *cur_hw_ste,
 270                                 enum mlx5dr_icm_chunk_size log_table_size);
 271 
 272 /* STE build functions */
 273 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
 274                                u8 match_criteria,
 275                                struct mlx5dr_match_param *mask,
 276                                struct mlx5dr_match_param *value);
 277 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
 278                              struct mlx5dr_matcher_rx_tx *nic_matcher,
 279                              struct mlx5dr_match_param *value,
 280                              u8 *ste_arr);
 281 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
 282                                     struct mlx5dr_match_param *mask,
 283                                     bool inner, bool rx);
 284 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
 285                                           struct mlx5dr_match_param *mask,
 286                                           bool inner, bool rx);
 287 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
 288                                        struct mlx5dr_match_param *mask,
 289                                        bool inner, bool rx);
 290 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
 291                                       struct mlx5dr_match_param *mask,
 292                                       bool inner, bool rx);
 293 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
 294                                       struct mlx5dr_match_param *mask,
 295                                       bool inner, bool rx);
 296 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
 297                                  struct mlx5dr_match_param *mask,
 298                                  bool inner, bool rx);
 299 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
 300                                  struct mlx5dr_match_param *mask,
 301                                  bool inner, bool rx);
 302 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
 303                                  struct mlx5dr_match_param *mask,
 304                                  bool inner, bool rx);
 305 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
 306                                  struct mlx5dr_match_param *mask,
 307                                  bool inner, bool rx);
 308 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
 309                                   struct mlx5dr_match_param *mask,
 310                                   bool inner, bool rx);
 311 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
 312                           struct mlx5dr_match_param *mask,
 313                           bool inner, bool rx);
 314 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
 315                            struct mlx5dr_match_param *mask,
 316                            bool inner, bool rx);
 317 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
 318                                     struct mlx5dr_match_param *mask,
 319                                     bool inner, bool rx);
 320 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
 321                                    struct mlx5dr_match_param *mask,
 322                                    struct mlx5dr_cmd_caps *caps,
 323                                    bool inner, bool rx);
 324 void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
 325                                       struct mlx5dr_match_param *mask,
 326                                       bool inner, bool rx);
 327 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
 328                                       struct mlx5dr_match_param *mask,
 329                                       bool inner, bool rx);
 330 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
 331                                  struct mlx5dr_match_param *mask,
 332                                  bool inner, bool rx);
 333 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
 334                                  struct mlx5dr_match_param *mask,
 335                                  bool inner, bool rx);
 336 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
 337                                   struct mlx5dr_match_param *mask,
 338                                   struct mlx5dr_domain *dmn,
 339                                   bool inner, bool rx);
 340 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
 341 
 342 /* Actions utils */
 343 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
 344                                  struct mlx5dr_matcher_rx_tx *nic_matcher,
 345                                  struct mlx5dr_action *actions[],
 346                                  u32 num_actions,
 347                                  u8 *ste_arr,
 348                                  u32 *new_hw_ste_arr_sz);
 349 
 350 struct mlx5dr_match_spec {
 351         u32 smac_47_16;         /* Source MAC address of incoming packet */
 352         /* Incoming packet Ethertype - this is the Ethertype
 353          * following the last VLAN tag of the packet
 354          */
 355         u32 ethertype:16;
 356         u32 smac_15_0:16;       /* Source MAC address of incoming packet */
 357         u32 dmac_47_16;         /* Destination MAC address of incoming packet */
 358         /* VLAN ID of first VLAN tag in the incoming packet.
 359          * Valid only when cvlan_tag==1 or svlan_tag==1
 360          */
 361         u32 first_vid:12;
 362         /* CFI bit of first VLAN tag in the incoming packet.
 363          * Valid only when cvlan_tag==1 or svlan_tag==1
 364          */
 365         u32 first_cfi:1;
 366         /* Priority of first VLAN tag in the incoming packet.
 367          * Valid only when cvlan_tag==1 or svlan_tag==1
 368          */
 369         u32 first_prio:3;
 370         u32 dmac_15_0:16;       /* Destination MAC address of incoming packet */
 371         /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
 372          *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
 373          */
 374         u32 tcp_flags:9;
 375         u32 ip_version:4;       /* IP version */
 376         u32 frag:1;             /* Packet is an IP fragment */
 377         /* The first vlan in the packet is s-vlan (0x8a88).
 378          * cvlan_tag and svlan_tag cannot be set together
 379          */
 380         u32 svlan_tag:1;
 381         /* The first vlan in the packet is c-vlan (0x8100).
 382          * cvlan_tag and svlan_tag cannot be set together
 383          */
 384         u32 cvlan_tag:1;
 385         /* Explicit Congestion Notification derived from
 386          * Traffic Class/TOS field of IPv6/v4
 387          */
 388         u32 ip_ecn:2;
 389         /* Differentiated Services Code Point derived from
 390          * Traffic Class/TOS field of IPv6/v4
 391          */
 392         u32 ip_dscp:6;
 393         u32 ip_protocol:8;      /* IP protocol */
 394         /* TCP destination port.
 395          * tcp and udp sport/dport are mutually exclusive
 396          */
 397         u32 tcp_dport:16;
 398         /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
 399         u32 tcp_sport:16;
 400         u32 ttl_hoplimit:8;
 401         u32 reserved:24;
 402         /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
 403         u32 udp_dport:16;
 404         /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
 405         u32 udp_sport:16;
 406         /* IPv6 source address of incoming packets
 407          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 408          * This field should be qualified by an appropriate ethertype
 409          */
 410         u32 src_ip_127_96;
 411         /* IPv6 source address of incoming packets
 412          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 413          * This field should be qualified by an appropriate ethertype
 414          */
 415         u32 src_ip_95_64;
 416         /* IPv6 source address of incoming packets
 417          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 418          * This field should be qualified by an appropriate ethertype
 419          */
 420         u32 src_ip_63_32;
 421         /* IPv6 source address of incoming packets
 422          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 423          * This field should be qualified by an appropriate ethertype
 424          */
 425         u32 src_ip_31_0;
 426         /* IPv6 destination address of incoming packets
 427          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 428          * This field should be qualified by an appropriate ethertype
 429          */
 430         u32 dst_ip_127_96;
 431         /* IPv6 destination address of incoming packets
 432          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 433          * This field should be qualified by an appropriate ethertype
 434          */
 435         u32 dst_ip_95_64;
 436         /* IPv6 destination address of incoming packets
 437          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 438          * This field should be qualified by an appropriate ethertype
 439          */
 440         u32 dst_ip_63_32;
 441         /* IPv6 destination address of incoming packets
 442          * For IPv4 address use bits 31:0 (rest of the bits are reserved)
 443          * This field should be qualified by an appropriate ethertype
 444          */
 445         u32 dst_ip_31_0;
 446 };
 447 
 448 struct mlx5dr_match_misc {
 449         u32 source_sqn:24;              /* Source SQN */
 450         u32 source_vhca_port:4;
 451         /* used with GRE, sequence number exist when gre_s_present == 1 */
 452         u32 gre_s_present:1;
 453         /* used with GRE, key exist when gre_k_present == 1 */
 454         u32 gre_k_present:1;
 455         u32 reserved_auto1:1;
 456         /* used with GRE, checksum exist when gre_c_present == 1 */
 457         u32 gre_c_present:1;
 458         /* Source port.;0xffff determines wire port */
 459         u32 source_port:16;
 460         u32 source_eswitch_owner_vhca_id:16;
 461         /* VLAN ID of first VLAN tag the inner header of the incoming packet.
 462          * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
 463          */
 464         u32 inner_second_vid:12;
 465         /* CFI bit of first VLAN tag in the inner header of the incoming packet.
 466          * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
 467          */
 468         u32 inner_second_cfi:1;
 469         /* Priority of second VLAN tag in the inner header of the incoming packet.
 470          * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
 471          */
 472         u32 inner_second_prio:3;
 473         /* VLAN ID of first VLAN tag the outer header of the incoming packet.
 474          * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
 475          */
 476         u32 outer_second_vid:12;
 477         /* CFI bit of first VLAN tag in the outer header of the incoming packet.
 478          * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
 479          */
 480         u32 outer_second_cfi:1;
 481         /* Priority of second VLAN tag in the outer header of the incoming packet.
 482          * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
 483          */
 484         u32 outer_second_prio:3;
 485         u32 gre_protocol:16;            /* GRE Protocol (outer) */
 486         u32 reserved_auto3:12;
 487         /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
 488          * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
 489          */
 490         u32 inner_second_svlan_tag:1;
 491         /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
 492          * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
 493          */
 494         u32 outer_second_svlan_tag:1;
 495         /* The second vlan in the inner header of the packet is c-vlan (0x8100).
 496          * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
 497          */
 498         u32 inner_second_cvlan_tag:1;
 499         /* The second vlan in the outer header of the packet is c-vlan (0x8100).
 500          * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
 501          */
 502         u32 outer_second_cvlan_tag:1;
 503         u32 gre_key_l:8;                /* GRE Key [7:0] (outer) */
 504         u32 gre_key_h:24;               /* GRE Key[31:8] (outer) */
 505         u32 reserved_auto4:8;
 506         u32 vxlan_vni:24;               /* VXLAN VNI (outer) */
 507         u32 geneve_oam:1;               /* GENEVE OAM field (outer) */
 508         u32 reserved_auto5:7;
 509         u32 geneve_vni:24;              /* GENEVE VNI field (outer) */
 510         u32 outer_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (outer) */
 511         u32 reserved_auto6:12;
 512         u32 inner_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (inner) */
 513         u32 reserved_auto7:12;
 514         u32 geneve_protocol_type:16;    /* GENEVE protocol type (outer) */
 515         u32 geneve_opt_len:6;           /* GENEVE OptLen (outer) */
 516         u32 reserved_auto8:10;
 517         u32 bth_dst_qp:24;              /* Destination QP in BTH header */
 518         u32 reserved_auto9:8;
 519         u8 reserved_auto10[20];
 520 };
 521 
 522 struct mlx5dr_match_misc2 {
 523         u32 outer_first_mpls_ttl:8;             /* First MPLS TTL (outer) */
 524         u32 outer_first_mpls_s_bos:1;           /* First MPLS S_BOS (outer) */
 525         u32 outer_first_mpls_exp:3;             /* First MPLS EXP (outer) */
 526         u32 outer_first_mpls_label:20;          /* First MPLS LABEL (outer) */
 527         u32 inner_first_mpls_ttl:8;             /* First MPLS TTL (inner) */
 528         u32 inner_first_mpls_s_bos:1;           /* First MPLS S_BOS (inner) */
 529         u32 inner_first_mpls_exp:3;             /* First MPLS EXP (inner) */
 530         u32 inner_first_mpls_label:20;          /* First MPLS LABEL (inner) */
 531         u32 outer_first_mpls_over_gre_ttl:8;    /* last MPLS TTL (outer) */
 532         u32 outer_first_mpls_over_gre_s_bos:1;  /* last MPLS S_BOS (outer) */
 533         u32 outer_first_mpls_over_gre_exp:3;    /* last MPLS EXP (outer) */
 534         u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
 535         u32 outer_first_mpls_over_udp_ttl:8;    /* last MPLS TTL (outer) */
 536         u32 outer_first_mpls_over_udp_s_bos:1;  /* last MPLS S_BOS (outer) */
 537         u32 outer_first_mpls_over_udp_exp:3;    /* last MPLS EXP (outer) */
 538         u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
 539         u32 metadata_reg_c_7;                   /* metadata_reg_c_7 */
 540         u32 metadata_reg_c_6;                   /* metadata_reg_c_6 */
 541         u32 metadata_reg_c_5;                   /* metadata_reg_c_5 */
 542         u32 metadata_reg_c_4;                   /* metadata_reg_c_4 */
 543         u32 metadata_reg_c_3;                   /* metadata_reg_c_3 */
 544         u32 metadata_reg_c_2;                   /* metadata_reg_c_2 */
 545         u32 metadata_reg_c_1;                   /* metadata_reg_c_1 */
 546         u32 metadata_reg_c_0;                   /* metadata_reg_c_0 */
 547         u32 metadata_reg_a;                     /* metadata_reg_a */
 548         u32 metadata_reg_b;                     /* metadata_reg_b */
 549         u8 reserved_auto2[8];
 550 };
 551 
 552 struct mlx5dr_match_misc3 {
 553         u32 inner_tcp_seq_num;
 554         u32 outer_tcp_seq_num;
 555         u32 inner_tcp_ack_num;
 556         u32 outer_tcp_ack_num;
 557         u32 outer_vxlan_gpe_vni:24;
 558         u32 reserved_auto1:8;
 559         u32 reserved_auto2:16;
 560         u32 outer_vxlan_gpe_flags:8;
 561         u32 outer_vxlan_gpe_next_protocol:8;
 562         u32 icmpv4_header_data;
 563         u32 icmpv6_header_data;
 564         u32 icmpv6_code:8;
 565         u32 icmpv6_type:8;
 566         u32 icmpv4_code:8;
 567         u32 icmpv4_type:8;
 568         u8 reserved_auto3[0x1c];
 569 };
 570 
 571 struct mlx5dr_match_param {
 572         struct mlx5dr_match_spec outer;
 573         struct mlx5dr_match_misc misc;
 574         struct mlx5dr_match_spec inner;
 575         struct mlx5dr_match_misc2 misc2;
 576         struct mlx5dr_match_misc3 misc3;
 577 };
 578 
 579 #define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
 580                                                    (_misc3)->icmpv4_code || \
 581                                                    (_misc3)->icmpv4_header_data)
 582 
 583 struct mlx5dr_esw_caps {
 584         u64 drop_icm_address_rx;
 585         u64 drop_icm_address_tx;
 586         u64 uplink_icm_address_rx;
 587         u64 uplink_icm_address_tx;
 588         bool sw_owner;
 589 };
 590 
 591 struct mlx5dr_cmd_vport_cap {
 592         u16 vport_gvmi;
 593         u16 vhca_gvmi;
 594         u64 icm_address_rx;
 595         u64 icm_address_tx;
 596         u32 num;
 597 };
 598 
 599 struct mlx5dr_cmd_caps {
 600         u16 gvmi;
 601         u64 nic_rx_drop_address;
 602         u64 nic_tx_drop_address;
 603         u64 nic_tx_allow_address;
 604         u64 esw_rx_drop_address;
 605         u64 esw_tx_drop_address;
 606         u32 log_icm_size;
 607         u64 hdr_modify_icm_addr;
 608         u32 flex_protocols;
 609         u8 flex_parser_id_icmp_dw0;
 610         u8 flex_parser_id_icmp_dw1;
 611         u8 flex_parser_id_icmpv6_dw0;
 612         u8 flex_parser_id_icmpv6_dw1;
 613         u8 max_ft_level;
 614         u16 roce_min_src_udp;
 615         u8 num_esw_ports;
 616         bool eswitch_manager;
 617         bool rx_sw_owner;
 618         bool tx_sw_owner;
 619         bool fdb_sw_owner;
 620         u32 num_vports;
 621         struct mlx5dr_esw_caps esw_caps;
 622         struct mlx5dr_cmd_vport_cap *vports_caps;
 623         bool prio_tag_required;
 624 };
 625 
 626 struct mlx5dr_domain_rx_tx {
 627         u64 drop_icm_addr;
 628         u64 default_icm_addr;
 629         enum mlx5dr_ste_entry_type ste_type;
 630 };
 631 
 632 struct mlx5dr_domain_info {
 633         bool supp_sw_steering;
 634         u32 max_inline_size;
 635         u32 max_send_wr;
 636         u32 max_log_sw_icm_sz;
 637         u32 max_log_action_icm_sz;
 638         struct mlx5dr_domain_rx_tx rx;
 639         struct mlx5dr_domain_rx_tx tx;
 640         struct mlx5dr_cmd_caps caps;
 641 };
 642 
 643 struct mlx5dr_domain_cache {
 644         struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
 645 };
 646 
 647 struct mlx5dr_domain {
 648         struct mlx5dr_domain *peer_dmn;
 649         struct mlx5_core_dev *mdev;
 650         u32 pdn;
 651         struct mlx5_uars_page *uar;
 652         enum mlx5dr_domain_type type;
 653         refcount_t refcount;
 654         struct mutex mutex; /* protect domain */
 655         struct mlx5dr_icm_pool *ste_icm_pool;
 656         struct mlx5dr_icm_pool *action_icm_pool;
 657         struct mlx5dr_send_ring *send_ring;
 658         struct mlx5dr_domain_info info;
 659         struct mlx5dr_domain_cache cache;
 660 };
 661 
 662 struct mlx5dr_table_rx_tx {
 663         struct mlx5dr_ste_htbl *s_anchor;
 664         struct mlx5dr_domain_rx_tx *nic_dmn;
 665         u64 default_icm_addr;
 666 };
 667 
 668 struct mlx5dr_table {
 669         struct mlx5dr_domain *dmn;
 670         struct mlx5dr_table_rx_tx rx;
 671         struct mlx5dr_table_rx_tx tx;
 672         u32 level;
 673         u32 table_type;
 674         u32 table_id;
 675         struct list_head matcher_list;
 676         struct mlx5dr_action *miss_action;
 677         refcount_t refcount;
 678 };
 679 
 680 struct mlx5dr_matcher_rx_tx {
 681         struct mlx5dr_ste_htbl *s_htbl;
 682         struct mlx5dr_ste_htbl *e_anchor;
 683         struct mlx5dr_ste_build *ste_builder;
 684         struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
 685         struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
 686         u8 num_of_builders;
 687         u8 num_of_builders4;
 688         u8 num_of_builders6;
 689         u64 default_icm_addr;
 690         struct mlx5dr_table_rx_tx *nic_tbl;
 691 };
 692 
 693 struct mlx5dr_matcher {
 694         struct mlx5dr_table *tbl;
 695         struct mlx5dr_matcher_rx_tx rx;
 696         struct mlx5dr_matcher_rx_tx tx;
 697         struct list_head matcher_list;
 698         u16 prio;
 699         struct mlx5dr_match_param mask;
 700         u8 match_criteria;
 701         refcount_t refcount;
 702         struct mlx5dv_flow_matcher *dv_matcher;
 703 };
 704 
 705 struct mlx5dr_rule_member {
 706         struct mlx5dr_ste *ste;
 707         /* attached to mlx5dr_rule via this */
 708         struct list_head list;
 709         /* attached to mlx5dr_ste via this */
 710         struct list_head use_ste_list;
 711 };
 712 
 713 struct mlx5dr_action {
 714         enum mlx5dr_action_type action_type;
 715         refcount_t refcount;
 716         union {
 717                 struct {
 718                         struct mlx5dr_domain *dmn;
 719                         struct mlx5dr_icm_chunk *chunk;
 720                         u8 *data;
 721                         u32 data_size;
 722                         u16 num_of_actions;
 723                         u32 index;
 724                         u8 allow_rx:1;
 725                         u8 allow_tx:1;
 726                         u8 modify_ttl:1;
 727                 } rewrite;
 728                 struct {
 729                         struct mlx5dr_domain *dmn;
 730                         u32 reformat_id;
 731                         u32 reformat_size;
 732                 } reformat;
 733                 struct {
 734                         u8 is_fw_tbl:1;
 735                         union {
 736                                 struct mlx5dr_table *tbl;
 737                                 struct {
 738                                         struct mlx5_flow_table *ft;
 739                                         u64 rx_icm_addr;
 740                                         u64 tx_icm_addr;
 741                                         struct mlx5_core_dev *mdev;
 742                                 } fw_tbl;
 743                         };
 744                 } dest_tbl;
 745                 struct {
 746                         u32 ctr_id;
 747                         u32 offeset;
 748                 } ctr;
 749                 struct {
 750                         struct mlx5dr_domain *dmn;
 751                         struct mlx5dr_cmd_vport_cap *caps;
 752                 } vport;
 753                 struct {
 754                         u32 vlan_hdr; /* tpid_pcp_dei_vid */
 755                 } push_vlan;
 756                 u32 flow_tag;
 757         };
 758 };
 759 
 760 enum mlx5dr_connect_type {
 761         CONNECT_HIT     = 1,
 762         CONNECT_MISS    = 2,
 763 };
 764 
 765 struct mlx5dr_htbl_connect_info {
 766         enum mlx5dr_connect_type type;
 767         union {
 768                 struct mlx5dr_ste_htbl *hit_next_htbl;
 769                 u64 miss_icm_addr;
 770         };
 771 };
 772 
 773 struct mlx5dr_rule_rx_tx {
 774         struct list_head rule_members_list;
 775         struct mlx5dr_matcher_rx_tx *nic_matcher;
 776 };
 777 
 778 struct mlx5dr_rule {
 779         struct mlx5dr_matcher *matcher;
 780         struct mlx5dr_rule_rx_tx rx;
 781         struct mlx5dr_rule_rx_tx tx;
 782         struct list_head rule_actions_list;
 783 };
 784 
 785 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
 786                                     struct mlx5dr_ste *ste);
 787 
 788 struct mlx5dr_icm_chunk {
 789         struct mlx5dr_icm_bucket *bucket;
 790         struct list_head chunk_list;
 791         u32 rkey;
 792         u32 num_of_entries;
 793         u32 byte_size;
 794         u64 icm_addr;
 795         u64 mr_addr;
 796 
 797         /* Memory optimisation */
 798         struct mlx5dr_ste *ste_arr;
 799         u8 *hw_ste_arr;
 800         struct list_head *miss_list;
 801 };
 802 
 803 static inline int
 804 mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
 805 {
 806         return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
 807 }
 808 
 809 static inline int
 810 mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
 811 {
 812         return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
 813 }
 814 
 815 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
 816                                    struct mlx5dr_matcher_rx_tx *nic_matcher,
 817                                    bool ipv6);
 818 
 819 static inline u32
 820 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
 821 {
 822         return 1 << chunk_size;
 823 }
 824 
 825 static inline int
 826 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
 827                                    enum mlx5dr_icm_type icm_type)
 828 {
 829         int num_of_entries;
 830         int entry_size;
 831 
 832         if (icm_type == DR_ICM_TYPE_STE)
 833                 entry_size = DR_STE_SIZE;
 834         else
 835                 entry_size = DR_MODIFY_ACTION_SIZE;
 836 
 837         num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
 838 
 839         return entry_size * num_of_entries;
 840 }
 841 
 842 static inline struct mlx5dr_cmd_vport_cap *
 843 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
 844 {
 845         if (!caps->vports_caps ||
 846             (vport >= caps->num_vports && vport != WIRE_PORT))
 847                 return NULL;
 848 
 849         if (vport == WIRE_PORT)
 850                 vport = caps->num_vports;
 851 
 852         return &caps->vports_caps[vport];
 853 }
 854 
 855 struct mlx5dr_cmd_query_flow_table_details {
 856         u8 status;
 857         u8 level;
 858         u64 sw_owner_icm_root_1;
 859         u64 sw_owner_icm_root_0;
 860 };
 861 
 862 /* internal API functions */
 863 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
 864                             struct mlx5dr_cmd_caps *caps);
 865 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
 866                                        bool other_vport, u16 vport_number,
 867                                        u64 *icm_address_rx,
 868                                        u64 *icm_address_tx);
 869 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
 870                           bool other_vport, u16 vport_number, u16 *gvmi);
 871 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
 872                               struct mlx5dr_esw_caps *caps);
 873 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
 874 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
 875                                         u32 table_type,
 876                                         u32 table_id,
 877                                         u32 group_id,
 878                                         u32 modify_header_id,
 879                                         u32 vport_id);
 880 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
 881                                     u32 table_type,
 882                                     u32 table_id);
 883 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
 884                                    u32 table_type,
 885                                    u8 num_of_actions,
 886                                    u64 *actions,
 887                                    u32 *modify_header_id);
 888 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
 889                                      u32 modify_header_id);
 890 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
 891                                        u32 table_type,
 892                                        u32 table_id,
 893                                        u32 *group_id);
 894 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
 895                                   u32 table_type,
 896                                   u32 table_id,
 897                                   u32 group_id);
 898 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
 899                                  u32 table_type,
 900                                  u64 icm_addr_rx,
 901                                  u64 icm_addr_tx,
 902                                  u8 level,
 903                                  bool sw_owner,
 904                                  bool term_tbl,
 905                                  u64 *fdb_rx_icm_addr,
 906                                  u32 *table_id);
 907 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
 908                                   u32 table_id,
 909                                   u32 table_type);
 910 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
 911                                 enum fs_flow_table_type type,
 912                                 u32 table_id,
 913                                 struct mlx5dr_cmd_query_flow_table_details *output);
 914 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
 915                                    enum mlx5_reformat_ctx_type rt,
 916                                    size_t reformat_size,
 917                                    void *reformat_data,
 918                                    u32 *reformat_id);
 919 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
 920                                      u32 reformat_id);
 921 
 922 struct mlx5dr_cmd_gid_attr {
 923         u8 gid[16];
 924         u8 mac[6];
 925         u32 roce_ver;
 926 };
 927 
 928 struct mlx5dr_cmd_qp_create_attr {
 929         u32 page_id;
 930         u32 pdn;
 931         u32 cqn;
 932         u32 pm_state;
 933         u32 service_type;
 934         u32 buff_umem_id;
 935         u32 db_umem_id;
 936         u32 sq_wqe_cnt;
 937         u32 rq_wqe_cnt;
 938         u32 rq_wqe_shift;
 939 };
 940 
 941 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
 942                          u16 index, struct mlx5dr_cmd_gid_attr *attr);
 943 
 944 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
 945                                                enum mlx5dr_icm_type icm_type);
 946 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
 947 
 948 struct mlx5dr_icm_chunk *
 949 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
 950                        enum mlx5dr_icm_chunk_size chunk_size);
 951 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
 952 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
 953 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
 954                                       struct mlx5dr_domain_rx_tx *nic_dmn,
 955                                       struct mlx5dr_ste_htbl *htbl,
 956                                       struct mlx5dr_htbl_connect_info *connect_info,
 957                                       bool update_hw_ste);
 958 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
 959                                   struct mlx5dr_domain_rx_tx *nic_dmn,
 960                                   struct mlx5dr_ste_htbl *htbl,
 961                                   u8 *formatted_ste,
 962                                   struct mlx5dr_htbl_connect_info *connect_info);
 963 void mlx5dr_ste_copy_param(u8 match_criteria,
 964                            struct mlx5dr_match_param *set_param,
 965                            struct mlx5dr_match_parameters *mask);
 966 
 967 void mlx5dr_crc32_init_table(void);
 968 u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
 969 
 970 struct mlx5dr_qp {
 971         struct mlx5_core_dev *mdev;
 972         struct mlx5_wq_qp wq;
 973         struct mlx5_uars_page *uar;
 974         struct mlx5_wq_ctrl wq_ctrl;
 975         struct mlx5_core_qp mqp;
 976         struct {
 977                 unsigned int pc;
 978                 unsigned int cc;
 979                 unsigned int size;
 980                 unsigned int *wqe_head;
 981                 unsigned int wqe_cnt;
 982         } sq;
 983         struct {
 984                 unsigned int pc;
 985                 unsigned int cc;
 986                 unsigned int size;
 987                 unsigned int wqe_cnt;
 988         } rq;
 989         int max_inline_data;
 990 };
 991 
 992 struct mlx5dr_cq {
 993         struct mlx5_core_dev *mdev;
 994         struct mlx5_cqwq wq;
 995         struct mlx5_wq_ctrl wq_ctrl;
 996         struct mlx5_core_cq mcq;
 997         struct mlx5dr_qp *qp;
 998 };
 999 
1000 struct mlx5dr_mr {
1001         struct mlx5_core_dev *mdev;
1002         struct mlx5_core_mkey mkey;
1003         dma_addr_t dma_addr;
1004         void *addr;
1005         size_t size;
1006 };
1007 
1008 #define MAX_SEND_CQE            64
1009 #define MIN_READ_SYNC           64
1010 
1011 struct mlx5dr_send_ring {
1012         struct mlx5dr_cq *cq;
1013         struct mlx5dr_qp *qp;
1014         struct mlx5dr_mr *mr;
1015         /* How much wqes are waiting for completion */
1016         u32 pending_wqe;
1017         /* Signal request per this trash hold value */
1018         u16 signal_th;
1019         /* Each post_send_size less than max_post_send_size */
1020         u32 max_post_send_size;
1021         /* manage the send queue */
1022         u32 tx_head;
1023         void *buf;
1024         u32 buf_size;
1025         struct ib_wc wc[MAX_SEND_CQE];
1026         u8 sync_buff[MIN_READ_SYNC];
1027         struct mlx5dr_mr *sync_mr;
1028 };
1029 
1030 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1031 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1032                            struct mlx5dr_send_ring *send_ring);
1033 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1034 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1035                              struct mlx5dr_ste *ste,
1036                              u8 *data,
1037                              u16 size,
1038                              u16 offset);
1039 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1040                               struct mlx5dr_ste_htbl *htbl,
1041                               u8 *formatted_ste, u8 *mask);
1042 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1043                                         struct mlx5dr_ste_htbl *htbl,
1044                                         u8 *ste_init_data,
1045                                         bool update_hw_ste);
1046 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1047                                 struct mlx5dr_action *action);
1048 
1049 struct mlx5dr_fw_recalc_cs_ft {
1050         u64 rx_icm_addr;
1051         u32 table_id;
1052         u32 group_id;
1053         u32 modify_hdr_id;
1054 };
1055 
1056 struct mlx5dr_fw_recalc_cs_ft *
1057 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1058 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1059                                     struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1060 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1061                                               u32 vport_num,
1062                                               u64 *rx_icm_addr);
1063 #endif  /* _DR_TYPES_H_ */

/* [<][>][^][v][top][bottom][index][help] */