root/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5dr_ste_calc_hash_index
  2. dr_ste_conv_bit_to_byte_mask
  3. mlx5dr_ste_set_bit_mask
  4. mlx5dr_ste_rx_set_flow_tag
  5. mlx5dr_ste_set_counter_id
  6. mlx5dr_ste_set_go_back_bit
  7. mlx5dr_ste_set_tx_push_vlan
  8. mlx5dr_ste_set_tx_encap
  9. mlx5dr_ste_set_rx_decap
  10. mlx5dr_ste_set_rx_pop_vlan
  11. mlx5dr_ste_set_rx_decap_l3
  12. mlx5dr_ste_set_entry_type
  13. mlx5dr_ste_get_entry_type
  14. mlx5dr_ste_set_rewrite_actions
  15. mlx5dr_ste_set_hit_gvmi
  16. mlx5dr_ste_init
  17. dr_ste_set_always_hit
  18. dr_ste_set_always_miss
  19. mlx5dr_ste_get_miss_addr
  20. mlx5dr_ste_set_hit_addr
  21. mlx5dr_ste_get_icm_addr
  22. mlx5dr_ste_get_mr_addr
  23. mlx5dr_ste_get_miss_list
  24. dr_ste_always_hit_htbl
  25. mlx5dr_ste_is_last_in_rule
  26. dr_ste_replace
  27. dr_ste_remove_head_ste
  28. dr_ste_replace_head_ste
  29. dr_ste_remove_middle_ste
  30. mlx5dr_ste_free
  31. mlx5dr_ste_equal_tag
  32. mlx5dr_ste_set_hit_addr_by_next_htbl
  33. mlx5dr_ste_set_miss_addr
  34. mlx5dr_ste_always_miss_addr
  35. mlx5dr_ste_is_not_valid_entry
  36. mlx5dr_ste_not_used_ste
  37. mlx5dr_ste_set_formatted_ste
  38. mlx5dr_ste_htbl_init_and_postsend
  39. mlx5dr_ste_create_next_htbl
  40. dr_ste_set_ctrl
  41. mlx5dr_ste_htbl_alloc
  42. mlx5dr_ste_htbl_free
  43. mlx5dr_ste_build_pre_check
  44. mlx5dr_ste_build_ste_arr
  45. dr_ste_build_eth_l2_src_des_bit_mask
  46. dr_ste_copy_mask_misc
  47. dr_ste_copy_mask_spec
  48. dr_ste_copy_mask_misc2
  49. dr_ste_copy_mask_misc3
  50. mlx5dr_ste_copy_param
  51. dr_ste_build_eth_l2_src_des_tag
  52. mlx5dr_ste_build_eth_l2_src_des
  53. dr_ste_build_eth_l3_ipv6_dst_bit_mask
  54. dr_ste_build_eth_l3_ipv6_dst_tag
  55. mlx5dr_ste_build_eth_l3_ipv6_dst
  56. dr_ste_build_eth_l3_ipv6_src_bit_mask
  57. dr_ste_build_eth_l3_ipv6_src_tag
  58. mlx5dr_ste_build_eth_l3_ipv6_src
  59. dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask
  60. dr_ste_build_eth_l3_ipv4_5_tuple_tag
  61. mlx5dr_ste_build_eth_l3_ipv4_5_tuple
  62. dr_ste_build_eth_l2_src_or_dst_bit_mask
  63. dr_ste_build_eth_l2_src_or_dst_tag
  64. dr_ste_build_eth_l2_src_bit_mask
  65. dr_ste_build_eth_l2_src_tag
  66. mlx5dr_ste_build_eth_l2_src
  67. dr_ste_build_eth_l2_dst_bit_mask
  68. dr_ste_build_eth_l2_dst_tag
  69. mlx5dr_ste_build_eth_l2_dst
  70. dr_ste_build_eth_l2_tnl_bit_mask
  71. dr_ste_build_eth_l2_tnl_tag
  72. mlx5dr_ste_build_eth_l2_tnl
  73. dr_ste_build_eth_l3_ipv4_misc_bit_mask
  74. dr_ste_build_eth_l3_ipv4_misc_tag
  75. mlx5dr_ste_build_eth_l3_ipv4_misc
  76. dr_ste_build_ipv6_l3_l4_bit_mask
  77. dr_ste_build_ipv6_l3_l4_tag
  78. mlx5dr_ste_build_ipv6_l3_l4
  79. dr_ste_build_empty_always_hit_tag
  80. mlx5dr_ste_build_empty_always_hit
  81. dr_ste_build_mpls_bit_mask
  82. dr_ste_build_mpls_tag
  83. mlx5dr_ste_build_mpls
  84. dr_ste_build_gre_bit_mask
  85. dr_ste_build_gre_tag
  86. mlx5dr_ste_build_gre
  87. dr_ste_build_flex_parser_0_bit_mask
  88. dr_ste_build_flex_parser_0_tag
  89. mlx5dr_ste_build_flex_parser_0
  90. dr_ste_build_flex_parser_1_bit_mask
  91. dr_ste_build_flex_parser_1_tag
  92. mlx5dr_ste_build_flex_parser_1
  93. dr_ste_build_general_purpose_bit_mask
  94. dr_ste_build_general_purpose_tag
  95. mlx5dr_ste_build_general_purpose
  96. dr_ste_build_eth_l4_misc_bit_mask
  97. dr_ste_build_eth_l4_misc_tag
  98. mlx5dr_ste_build_eth_l4_misc
  99. dr_ste_build_flex_parser_tnl_bit_mask
  100. dr_ste_build_flex_parser_tnl_tag
  101. mlx5dr_ste_build_flex_parser_tnl
  102. dr_ste_build_register_0_bit_mask
  103. dr_ste_build_register_0_tag
  104. mlx5dr_ste_build_register_0
  105. dr_ste_build_register_1_bit_mask
  106. dr_ste_build_register_1_tag
  107. mlx5dr_ste_build_register_1
  108. dr_ste_build_src_gvmi_qpn_bit_mask
  109. dr_ste_build_src_gvmi_qpn_tag
  110. mlx5dr_ste_build_src_gvmi_qpn

   1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2 /* Copyright (c) 2019 Mellanox Technologies. */
   3 
   4 #include <linux/types.h>
   5 #include "dr_types.h"
   6 
   7 #define DR_STE_CRC_POLY 0xEDB88320L
   8 #define STE_IPV4 0x1
   9 #define STE_IPV6 0x2
  10 #define STE_TCP 0x1
  11 #define STE_UDP 0x2
  12 #define STE_SPI 0x3
  13 #define IP_VERSION_IPV4 0x4
  14 #define IP_VERSION_IPV6 0x6
  15 #define STE_SVLAN 0x1
  16 #define STE_CVLAN 0x2
  17 
  18 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
  19 
  20 /* Set to STE a specific value using DR_STE_SET */
  21 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
  22         if ((spec)->s_fname) { \
  23                 MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
  24                 (spec)->s_fname = 0; \
  25         } \
  26 } while (0)
  27 
  28 /* Set to STE spec->s_fname to tag->t_fname */
  29 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
  30         DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
  31 
  32 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
  33 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
  34         DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
  35 
  36 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
  37 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
  38         DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
  39 
  40 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
  41         MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
  42         MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
  43         MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
  44         MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
  45         MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
  46         MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
  47         MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
  48         MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
  49         MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
  50 } while (0)
  51 
  52 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
  53         DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
  54                           in_out##_first_mpls_label);\
  55         DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
  56                           in_out##_first_mpls_s_bos); \
  57         DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
  58                           in_out##_first_mpls_exp); \
  59         DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
  60                           in_out##_first_mpls_ttl); \
  61 } while (0)
  62 
  63 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
  64         DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
  65                        in_out##_first_mpls_label);\
  66         DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
  67                        in_out##_first_mpls_s_bos); \
  68         DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
  69                        in_out##_first_mpls_exp); \
  70         DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
  71                        in_out##_first_mpls_ttl); \
  72 } while (0)
  73 
  74 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
  75         (_misc)->outer_first_mpls_over_gre_label || \
  76         (_misc)->outer_first_mpls_over_gre_exp || \
  77         (_misc)->outer_first_mpls_over_gre_s_bos || \
  78         (_misc)->outer_first_mpls_over_gre_ttl)
  79 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
  80         (_misc)->outer_first_mpls_over_udp_label || \
  81         (_misc)->outer_first_mpls_over_udp_exp || \
  82         (_misc)->outer_first_mpls_over_udp_s_bos || \
  83         (_misc)->outer_first_mpls_over_udp_ttl)
  84 
  85 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
  86         ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
  87                    (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
  88                           MLX5DR_STE_LU_TYPE_##lookup_type##_O)
  89 
  90 enum dr_ste_tunl_action {
  91         DR_STE_TUNL_ACTION_NONE         = 0,
  92         DR_STE_TUNL_ACTION_ENABLE       = 1,
  93         DR_STE_TUNL_ACTION_DECAP        = 2,
  94         DR_STE_TUNL_ACTION_L3_DECAP     = 3,
  95         DR_STE_TUNL_ACTION_POP_VLAN     = 4,
  96 };
  97 
  98 enum dr_ste_action_type {
  99         DR_STE_ACTION_TYPE_PUSH_VLAN    = 1,
 100         DR_STE_ACTION_TYPE_ENCAP_L3     = 3,
 101         DR_STE_ACTION_TYPE_ENCAP        = 4,
 102 };
 103 
 104 struct dr_hw_ste_format {
 105         u8 ctrl[DR_STE_SIZE_CTRL];
 106         u8 tag[DR_STE_SIZE_TAG];
 107         u8 mask[DR_STE_SIZE_MASK];
 108 };
 109 
 110 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
 111 {
 112         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
 113         u8 masked[DR_STE_SIZE_TAG] = {};
 114         u32 crc32, index;
 115         u16 bit;
 116         int i;
 117 
 118         /* Don't calculate CRC if the result is predicted */
 119         if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
 120                 return 0;
 121 
 122         /* Mask tag using byte mask, bit per byte */
 123         bit = 1 << (DR_STE_SIZE_TAG - 1);
 124         for (i = 0; i < DR_STE_SIZE_TAG; i++) {
 125                 if (htbl->byte_mask & bit)
 126                         masked[i] = hw_ste->tag[i];
 127 
 128                 bit = bit >> 1;
 129         }
 130 
 131         crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
 132         index = crc32 & (htbl->chunk->num_of_entries - 1);
 133 
 134         return index;
 135 }
 136 
 137 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
 138 {
 139         u16 byte_mask = 0;
 140         int i;
 141 
 142         for (i = 0; i < DR_STE_SIZE_MASK; i++) {
 143                 byte_mask = byte_mask << 1;
 144                 if (bit_mask[i] == 0xff)
 145                         byte_mask |= 1;
 146         }
 147         return byte_mask;
 148 }
 149 
 150 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
 151 {
 152         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
 153 
 154         memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
 155 }
 156 
 157 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
 158 {
 159         MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
 160                  DR_STE_ENABLE_FLOW_TAG | flow_tag);
 161 }
 162 
 163 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
 164 {
 165         /* This can be used for both rx_steering_mult and for sx_transmit */
 166         MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
 167         MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
 168 }
 169 
 170 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
 171 {
 172         MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
 173 }
 174 
 175 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
 176                                  bool go_back)
 177 {
 178         MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
 179                  DR_STE_ACTION_TYPE_PUSH_VLAN);
 180         MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
 181         /* Due to HW limitation we need to set this bit, otherwise reforamt +
 182          * push vlan will not work.
 183          */
 184         if (go_back)
 185                 mlx5dr_ste_set_go_back_bit(hw_ste_p);
 186 }
 187 
 188 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
 189 {
 190         MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
 191                  encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
 192         /* The hardware expects here size in words (2 byte) */
 193         MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
 194         MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
 195 }
 196 
 197 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
 198 {
 199         MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
 200                  DR_STE_TUNL_ACTION_DECAP);
 201 }
 202 
 203 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
 204 {
 205         MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
 206                  DR_STE_TUNL_ACTION_POP_VLAN);
 207 }
 208 
 209 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
 210 {
 211         MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
 212                  DR_STE_TUNL_ACTION_L3_DECAP);
 213         MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
 214 }
 215 
 216 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
 217 {
 218         MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
 219 }
 220 
 221 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
 222 {
 223         return MLX5_GET(ste_general, hw_ste_p, entry_type);
 224 }
 225 
 226 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
 227                                     u32 re_write_index)
 228 {
 229         MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
 230                  num_of_actions);
 231         MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
 232                  re_write_index);
 233 }
 234 
 235 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
 236 {
 237         MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
 238 }
 239 
 240 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
 241                      u16 gvmi)
 242 {
 243         MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
 244         MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
 245         MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
 246 
 247         /* Set GVMI once, this is the same for RX/TX
 248          * bits 63_48 of next table base / miss address encode the next GVMI
 249          */
 250         MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
 251         MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
 252         MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
 253 }
 254 
 255 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
 256 {
 257         memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
 258         memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
 259 }
 260 
 261 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
 262 {
 263         hw_ste->tag[0] = 0xdc;
 264         hw_ste->mask[0] = 0;
 265 }
 266 
 267 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
 268 {
 269         u64 index =
 270                 (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
 271                  MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
 272 
 273         return index << 6;
 274 }
 275 
 276 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
 277 {
 278         u64 index = (icm_addr >> 5) | ht_size;
 279 
 280         MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
 281         MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
 282 }
 283 
 284 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
 285 {
 286         u32 index = ste - ste->htbl->ste_arr;
 287 
 288         return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
 289 }
 290 
 291 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
 292 {
 293         u32 index = ste - ste->htbl->ste_arr;
 294 
 295         return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
 296 }
 297 
 298 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
 299 {
 300         u32 index = ste - ste->htbl->ste_arr;
 301 
 302         return &ste->htbl->miss_list[index];
 303 }
 304 
 305 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
 306                                    struct mlx5dr_ste_htbl *next_htbl)
 307 {
 308         struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
 309         u8 *hw_ste = ste->hw_ste;
 310 
 311         MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
 312         MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
 313         mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
 314 
 315         dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
 316 }
 317 
 318 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
 319                                 u8 ste_location)
 320 {
 321         return ste_location == nic_matcher->num_of_builders;
 322 }
 323 
 324 /* Replace relevant fields, except of:
 325  * htbl - keep the origin htbl
 326  * miss_list + list - already took the src from the list.
 327  * icm_addr/mr_addr - depends on the hosting table.
 328  *
 329  * Before:
 330  * | a | -> | b | -> | c | ->
 331  *
 332  * After:
 333  * | a | -> | c | ->
 334  * While the data that was in b copied to a.
 335  */
 336 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
 337 {
 338         memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
 339         dst->next_htbl = src->next_htbl;
 340         if (dst->next_htbl)
 341                 dst->next_htbl->pointing_ste = dst;
 342 
 343         dst->refcount = src->refcount;
 344 
 345         INIT_LIST_HEAD(&dst->rule_list);
 346         list_splice_tail_init(&src->rule_list, &dst->rule_list);
 347 }
 348 
 349 /* Free ste which is the head and the only one in miss_list */
 350 static void
 351 dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
 352                        struct mlx5dr_matcher_rx_tx *nic_matcher,
 353                        struct mlx5dr_ste_send_info *ste_info_head,
 354                        struct list_head *send_ste_list,
 355                        struct mlx5dr_ste_htbl *stats_tbl)
 356 {
 357         u8 tmp_data_ste[DR_STE_SIZE] = {};
 358         struct mlx5dr_ste tmp_ste = {};
 359         u64 miss_addr;
 360 
 361         tmp_ste.hw_ste = tmp_data_ste;
 362 
 363         /* Use temp ste because dr_ste_always_miss_addr
 364          * touches bit_mask area which doesn't exist at ste->hw_ste.
 365          */
 366         memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
 367         miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
 368         mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
 369         memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
 370 
 371         list_del_init(&ste->miss_list_node);
 372 
 373         /* Write full STE size in order to have "always_miss" */
 374         mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
 375                                                   0, tmp_data_ste,
 376                                                   ste_info_head,
 377                                                   send_ste_list,
 378                                                   true /* Copy data */);
 379 
 380         stats_tbl->ctrl.num_of_valid_entries--;
 381 }
 382 
 383 /* Free ste which is the head but NOT the only one in miss_list:
 384  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
 385  */
 386 static void
 387 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
 388                         struct mlx5dr_ste_send_info *ste_info_head,
 389                         struct list_head *send_ste_list,
 390                         struct mlx5dr_ste_htbl *stats_tbl)
 391 
 392 {
 393         struct mlx5dr_ste_htbl *next_miss_htbl;
 394 
 395         next_miss_htbl = next_ste->htbl;
 396 
 397         /* Remove from the miss_list the next_ste before copy */
 398         list_del_init(&next_ste->miss_list_node);
 399 
 400         /* All rule-members that use next_ste should know about that */
 401         mlx5dr_rule_update_rule_member(next_ste, ste);
 402 
 403         /* Move data from next into ste */
 404         dr_ste_replace(ste, next_ste);
 405 
 406         /* Del the htbl that contains the next_ste.
 407          * The origin htbl stay with the same number of entries.
 408          */
 409         mlx5dr_htbl_put(next_miss_htbl);
 410 
 411         mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
 412                                                   0, ste->hw_ste,
 413                                                   ste_info_head,
 414                                                   send_ste_list,
 415                                                   true /* Copy data */);
 416 
 417         stats_tbl->ctrl.num_of_collisions--;
 418         stats_tbl->ctrl.num_of_valid_entries--;
 419 }
 420 
 421 /* Free ste that is located in the middle of the miss list:
 422  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
 423  */
 424 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
 425                                      struct mlx5dr_ste_send_info *ste_info,
 426                                      struct list_head *send_ste_list,
 427                                      struct mlx5dr_ste_htbl *stats_tbl)
 428 {
 429         struct mlx5dr_ste *prev_ste;
 430         u64 miss_addr;
 431 
 432         prev_ste = list_prev_entry(ste, miss_list_node);
 433         if (WARN_ON(!prev_ste))
 434                 return;
 435 
 436         miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
 437         mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
 438 
 439         mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
 440                                                   prev_ste->hw_ste, ste_info,
 441                                                   send_ste_list, true /* Copy data*/);
 442 
 443         list_del_init(&ste->miss_list_node);
 444 
 445         stats_tbl->ctrl.num_of_valid_entries--;
 446         stats_tbl->ctrl.num_of_collisions--;
 447 }
 448 
 449 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
 450                      struct mlx5dr_matcher *matcher,
 451                      struct mlx5dr_matcher_rx_tx *nic_matcher)
 452 {
 453         struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
 454         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 455         struct mlx5dr_ste_send_info ste_info_head;
 456         struct mlx5dr_ste *next_ste, *first_ste;
 457         bool put_on_origin_table = true;
 458         struct mlx5dr_ste_htbl *stats_tbl;
 459         LIST_HEAD(send_ste_list);
 460 
 461         first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
 462                                      struct mlx5dr_ste, miss_list_node);
 463         stats_tbl = first_ste->htbl;
 464 
 465         /* Two options:
 466          * 1. ste is head:
 467          *      a. head ste is the only ste in the miss list
 468          *      b. head ste is not the only ste in the miss-list
 469          * 2. ste is not head
 470          */
 471         if (first_ste == ste) { /* Ste is the head */
 472                 struct mlx5dr_ste *last_ste;
 473 
 474                 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
 475                                            struct mlx5dr_ste, miss_list_node);
 476                 if (last_ste == first_ste)
 477                         next_ste = NULL;
 478                 else
 479                         next_ste = list_next_entry(ste, miss_list_node);
 480 
 481                 if (!next_ste) {
 482                         /* One and only entry in the list */
 483                         dr_ste_remove_head_ste(ste, nic_matcher,
 484                                                &ste_info_head,
 485                                                &send_ste_list,
 486                                                stats_tbl);
 487                 } else {
 488                         /* First but not only entry in the list */
 489                         dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
 490                                                 &send_ste_list, stats_tbl);
 491                         put_on_origin_table = false;
 492                 }
 493         } else { /* Ste in the middle of the list */
 494                 dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
 495         }
 496 
 497         /* Update HW */
 498         list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
 499                                  &send_ste_list, send_list) {
 500                 list_del(&cur_ste_info->send_list);
 501                 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
 502                                          cur_ste_info->data, cur_ste_info->size,
 503                                          cur_ste_info->offset);
 504         }
 505 
 506         if (put_on_origin_table)
 507                 mlx5dr_htbl_put(ste->htbl);
 508 }
 509 
 510 bool mlx5dr_ste_equal_tag(void *src, void *dst)
 511 {
 512         struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
 513         struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
 514 
 515         return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
 516 }
 517 
 518 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
 519                                           struct mlx5dr_ste_htbl *next_htbl)
 520 {
 521         struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
 522 
 523         mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
 524 }
 525 
 526 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
 527 {
 528         u64 index = miss_addr >> 6;
 529 
 530         /* Miss address for TX and RX STEs located in the same offsets */
 531         MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
 532         MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
 533 }
 534 
 535 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
 536 {
 537         u8 *hw_ste = ste->hw_ste;
 538 
 539         MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
 540         mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
 541         dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
 542 }
 543 
 544 /* The assumption here is that we don't update the ste->hw_ste if it is not
 545  * used ste, so it will be all zero, checking the next_lu_type.
 546  */
 547 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
 548 {
 549         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
 550 
 551         if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
 552             MLX5DR_STE_LU_TYPE_NOP)
 553                 return true;
 554 
 555         return false;
 556 }
 557 
 558 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
 559 {
 560         return !ste->refcount;
 561 }
 562 
 563 /* Init one ste as a pattern for ste data array */
 564 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
 565                                   struct mlx5dr_domain_rx_tx *nic_dmn,
 566                                   struct mlx5dr_ste_htbl *htbl,
 567                                   u8 *formatted_ste,
 568                                   struct mlx5dr_htbl_connect_info *connect_info)
 569 {
 570         struct mlx5dr_ste ste = {};
 571 
 572         mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
 573         ste.hw_ste = formatted_ste;
 574 
 575         if (connect_info->type == CONNECT_HIT)
 576                 dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
 577         else
 578                 mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
 579 }
 580 
 581 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
 582                                       struct mlx5dr_domain_rx_tx *nic_dmn,
 583                                       struct mlx5dr_ste_htbl *htbl,
 584                                       struct mlx5dr_htbl_connect_info *connect_info,
 585                                       bool update_hw_ste)
 586 {
 587         u8 formatted_ste[DR_STE_SIZE] = {};
 588 
 589         mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
 590                                      nic_dmn,
 591                                      htbl,
 592                                      formatted_ste,
 593                                      connect_info);
 594 
 595         return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
 596 }
 597 
 598 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
 599                                 struct mlx5dr_matcher_rx_tx *nic_matcher,
 600                                 struct mlx5dr_ste *ste,
 601                                 u8 *cur_hw_ste,
 602                                 enum mlx5dr_icm_chunk_size log_table_size)
 603 {
 604         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
 605         struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 606         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 607         struct mlx5dr_htbl_connect_info info;
 608         struct mlx5dr_ste_htbl *next_htbl;
 609 
 610         if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
 611                 u8 next_lu_type;
 612                 u16 byte_mask;
 613 
 614                 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
 615                 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
 616 
 617                 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
 618                                                   log_table_size,
 619                                                   next_lu_type,
 620                                                   byte_mask);
 621                 if (!next_htbl) {
 622                         mlx5dr_dbg(dmn, "Failed allocating table\n");
 623                         return -ENOMEM;
 624                 }
 625 
 626                 /* Write new table to HW */
 627                 info.type = CONNECT_MISS;
 628                 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
 629                 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
 630                                                       &info, false)) {
 631                         mlx5dr_info(dmn, "Failed writing table to HW\n");
 632                         goto free_table;
 633                 }
 634 
 635                 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
 636                 ste->next_htbl = next_htbl;
 637                 next_htbl->pointing_ste = ste;
 638         }
 639 
 640         return 0;
 641 
 642 free_table:
 643         mlx5dr_ste_htbl_free(next_htbl);
 644         return -ENOENT;
 645 }
 646 
 647 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
 648 {
 649         struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
 650         int num_of_entries;
 651 
 652         htbl->ctrl.may_grow = true;
 653 
 654         if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
 655                 htbl->ctrl.may_grow = false;
 656 
 657         /* Threshold is 50%, one is added to table of size 1 */
 658         num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
 659         ctrl->increase_threshold = (num_of_entries + 1) / 2;
 660 }
 661 
 662 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 663                                               enum mlx5dr_icm_chunk_size chunk_size,
 664                                               u8 lu_type, u16 byte_mask)
 665 {
 666         struct mlx5dr_icm_chunk *chunk;
 667         struct mlx5dr_ste_htbl *htbl;
 668         int i;
 669 
 670         htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
 671         if (!htbl)
 672                 return NULL;
 673 
 674         chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
 675         if (!chunk)
 676                 goto out_free_htbl;
 677 
 678         htbl->chunk = chunk;
 679         htbl->lu_type = lu_type;
 680         htbl->byte_mask = byte_mask;
 681         htbl->ste_arr = chunk->ste_arr;
 682         htbl->hw_ste_arr = chunk->hw_ste_arr;
 683         htbl->miss_list = chunk->miss_list;
 684         htbl->refcount = 0;
 685 
 686         for (i = 0; i < chunk->num_of_entries; i++) {
 687                 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
 688 
 689                 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
 690                 ste->htbl = htbl;
 691                 ste->refcount = 0;
 692                 INIT_LIST_HEAD(&ste->miss_list_node);
 693                 INIT_LIST_HEAD(&htbl->miss_list[i]);
 694                 INIT_LIST_HEAD(&ste->rule_list);
 695         }
 696 
 697         htbl->chunk_size = chunk_size;
 698         dr_ste_set_ctrl(htbl);
 699         return htbl;
 700 
 701 out_free_htbl:
 702         kfree(htbl);
 703         return NULL;
 704 }
 705 
 706 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
 707 {
 708         if (htbl->refcount)
 709                 return -EBUSY;
 710 
 711         mlx5dr_icm_free_chunk(htbl->chunk);
 712         kfree(htbl);
 713         return 0;
 714 }
 715 
 716 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
 717                                u8 match_criteria,
 718                                struct mlx5dr_match_param *mask,
 719                                struct mlx5dr_match_param *value)
 720 {
 721         if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
 722                 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
 723                         mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
 724                         return -EINVAL;
 725                 }
 726         }
 727 
 728         return 0;
 729 }
 730 
 731 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
 732                              struct mlx5dr_matcher_rx_tx *nic_matcher,
 733                              struct mlx5dr_match_param *value,
 734                              u8 *ste_arr)
 735 {
 736         struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 737         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 738         struct mlx5dr_ste_build *sb;
 739         int ret, i;
 740 
 741         ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
 742                                          &matcher->mask, value);
 743         if (ret)
 744                 return ret;
 745 
 746         sb = nic_matcher->ste_builder;
 747         for (i = 0; i < nic_matcher->num_of_builders; i++) {
 748                 mlx5dr_ste_init(ste_arr,
 749                                 sb->lu_type,
 750                                 nic_dmn->ste_type,
 751                                 dmn->info.caps.gvmi);
 752 
 753                 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
 754 
 755                 ret = sb->ste_build_tag_func(value, sb, ste_arr);
 756                 if (ret)
 757                         return ret;
 758 
 759                 /* Connect the STEs */
 760                 if (i < (nic_matcher->num_of_builders - 1)) {
 761                         /* Need the next builder for these fields,
 762                          * not relevant for the last ste in the chain.
 763                          */
 764                         sb++;
 765                         MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
 766                         MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
 767                 }
 768                 ste_arr += DR_STE_SIZE;
 769         }
 770         return 0;
 771 }
 772 
 773 static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
 774                                                 bool inner, u8 *bit_mask)
 775 {
 776         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
 777 
 778         DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
 779         DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
 780 
 781         if (mask->smac_47_16 || mask->smac_15_0) {
 782                 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
 783                          mask->smac_47_16 >> 16);
 784                 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
 785                          mask->smac_47_16 << 16 | mask->smac_15_0);
 786                 mask->smac_47_16 = 0;
 787                 mask->smac_15_0 = 0;
 788         }
 789 
 790         DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
 791         DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
 792         DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
 793         DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
 794 
 795         if (mask->cvlan_tag) {
 796                 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
 797                 mask->cvlan_tag = 0;
 798         } else if (mask->svlan_tag) {
 799                 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
 800                 mask->svlan_tag = 0;
 801         }
 802 
 803         if (mask->cvlan_tag || mask->svlan_tag) {
 804                 pr_info("Invalid c/svlan mask configuration\n");
 805                 return -EINVAL;
 806         }
 807 
 808         return 0;
 809 }
 810 
 811 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
 812 {
 813         spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
 814         spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
 815         spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
 816         spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
 817         spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
 818 
 819         spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
 820         spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
 821                                                       source_eswitch_owner_vhca_id);
 822 
 823         spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
 824         spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
 825         spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
 826         spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
 827         spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
 828         spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
 829 
 830         spec->outer_second_cvlan_tag =
 831                 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
 832         spec->inner_second_cvlan_tag =
 833                 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
 834         spec->outer_second_svlan_tag =
 835                 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
 836         spec->inner_second_svlan_tag =
 837                 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
 838 
 839         spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
 840 
 841         spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
 842         spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
 843 
 844         spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
 845 
 846         spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
 847         spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
 848 
 849         spec->outer_ipv6_flow_label =
 850                 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
 851 
 852         spec->inner_ipv6_flow_label =
 853                 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
 854 
 855         spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
 856         spec->geneve_protocol_type =
 857                 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
 858 
 859         spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
 860 }
 861 
 862 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
 863 {
 864         u32 raw_ip[4];
 865 
 866         spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
 867 
 868         spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
 869         spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
 870 
 871         spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
 872 
 873         spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
 874         spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
 875         spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
 876         spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
 877 
 878         spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
 879         spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
 880         spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
 881         spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
 882         spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
 883         spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
 884         spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
 885         spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
 886         spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
 887         spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
 888 
 889         spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
 890 
 891         spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
 892         spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
 893 
 894         memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
 895                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
 896                                     sizeof(raw_ip));
 897 
 898         spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
 899         spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
 900         spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
 901         spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
 902 
 903         memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
 904                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 905                                     sizeof(raw_ip));
 906 
 907         spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
 908         spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
 909         spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
 910         spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
 911 }
 912 
 913 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
 914 {
 915         spec->outer_first_mpls_label =
 916                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
 917         spec->outer_first_mpls_exp =
 918                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
 919         spec->outer_first_mpls_s_bos =
 920                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
 921         spec->outer_first_mpls_ttl =
 922                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
 923         spec->inner_first_mpls_label =
 924                 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
 925         spec->inner_first_mpls_exp =
 926                 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
 927         spec->inner_first_mpls_s_bos =
 928                 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
 929         spec->inner_first_mpls_ttl =
 930                 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
 931         spec->outer_first_mpls_over_gre_label =
 932                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
 933         spec->outer_first_mpls_over_gre_exp =
 934                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
 935         spec->outer_first_mpls_over_gre_s_bos =
 936                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
 937         spec->outer_first_mpls_over_gre_ttl =
 938                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
 939         spec->outer_first_mpls_over_udp_label =
 940                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
 941         spec->outer_first_mpls_over_udp_exp =
 942                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
 943         spec->outer_first_mpls_over_udp_s_bos =
 944                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
 945         spec->outer_first_mpls_over_udp_ttl =
 946                 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
 947         spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
 948         spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
 949         spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
 950         spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
 951         spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
 952         spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
 953         spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
 954         spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
 955         spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
 956         spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
 957 }
 958 
 959 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
 960 {
 961         spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
 962         spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
 963         spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
 964         spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
 965         spec->outer_vxlan_gpe_vni =
 966                 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
 967         spec->outer_vxlan_gpe_next_protocol =
 968                 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
 969         spec->outer_vxlan_gpe_flags =
 970                 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
 971         spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
 972         spec->icmpv6_header_data =
 973                 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
 974         spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
 975         spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
 976         spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
 977         spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
 978 }
 979 
 980 void mlx5dr_ste_copy_param(u8 match_criteria,
 981                            struct mlx5dr_match_param *set_param,
 982                            struct mlx5dr_match_parameters *mask)
 983 {
 984         u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
 985         u8 *data = (u8 *)mask->match_buf;
 986         size_t param_location;
 987         void *buff;
 988 
 989         if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
 990                 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
 991                         memcpy(tail_param, data, mask->match_sz);
 992                         buff = tail_param;
 993                 } else {
 994                         buff = mask->match_buf;
 995                 }
 996                 dr_ste_copy_mask_spec(buff, &set_param->outer);
 997         }
 998         param_location = sizeof(struct mlx5dr_match_spec);
 999 
1000         if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
1001                 if (mask->match_sz < param_location +
1002                     sizeof(struct mlx5dr_match_misc)) {
1003                         memcpy(tail_param, data + param_location,
1004                                mask->match_sz - param_location);
1005                         buff = tail_param;
1006                 } else {
1007                         buff = data + param_location;
1008                 }
1009                 dr_ste_copy_mask_misc(buff, &set_param->misc);
1010         }
1011         param_location += sizeof(struct mlx5dr_match_misc);
1012 
1013         if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1014                 if (mask->match_sz < param_location +
1015                     sizeof(struct mlx5dr_match_spec)) {
1016                         memcpy(tail_param, data + param_location,
1017                                mask->match_sz - param_location);
1018                         buff = tail_param;
1019                 } else {
1020                         buff = data + param_location;
1021                 }
1022                 dr_ste_copy_mask_spec(buff, &set_param->inner);
1023         }
1024         param_location += sizeof(struct mlx5dr_match_spec);
1025 
1026         if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1027                 if (mask->match_sz < param_location +
1028                     sizeof(struct mlx5dr_match_misc2)) {
1029                         memcpy(tail_param, data + param_location,
1030                                mask->match_sz - param_location);
1031                         buff = tail_param;
1032                 } else {
1033                         buff = data + param_location;
1034                 }
1035                 dr_ste_copy_mask_misc2(buff, &set_param->misc2);
1036         }
1037 
1038         param_location += sizeof(struct mlx5dr_match_misc2);
1039 
1040         if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1041                 if (mask->match_sz < param_location +
1042                     sizeof(struct mlx5dr_match_misc3)) {
1043                         memcpy(tail_param, data + param_location,
1044                                mask->match_sz - param_location);
1045                         buff = tail_param;
1046                 } else {
1047                         buff = data + param_location;
1048                 }
1049                 dr_ste_copy_mask_misc3(buff, &set_param->misc3);
1050         }
1051 }
1052 
1053 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
1054                                            struct mlx5dr_ste_build *sb,
1055                                            u8 *hw_ste_p)
1056 {
1057         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1058         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1059         u8 *tag = hw_ste->tag;
1060 
1061         DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
1062         DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
1063 
1064         if (spec->smac_47_16 || spec->smac_15_0) {
1065                 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
1066                          spec->smac_47_16 >> 16);
1067                 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
1068                          spec->smac_47_16 << 16 | spec->smac_15_0);
1069                 spec->smac_47_16 = 0;
1070                 spec->smac_15_0 = 0;
1071         }
1072 
1073         if (spec->ip_version) {
1074                 if (spec->ip_version == IP_VERSION_IPV4) {
1075                         MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
1076                         spec->ip_version = 0;
1077                 } else if (spec->ip_version == IP_VERSION_IPV6) {
1078                         MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
1079                         spec->ip_version = 0;
1080                 } else {
1081                         pr_info("Unsupported ip_version value\n");
1082                         return -EINVAL;
1083                 }
1084         }
1085 
1086         DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
1087         DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
1088         DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
1089 
1090         if (spec->cvlan_tag) {
1091                 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
1092                 spec->cvlan_tag = 0;
1093         } else if (spec->svlan_tag) {
1094                 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
1095                 spec->svlan_tag = 0;
1096         }
1097         return 0;
1098 }
1099 
1100 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
1101                                     struct mlx5dr_match_param *mask,
1102                                     bool inner, bool rx)
1103 {
1104         int ret;
1105 
1106         ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
1107         if (ret)
1108                 return ret;
1109 
1110         sb->rx = rx;
1111         sb->inner = inner;
1112         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
1113         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1114         sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
1115 
1116         return 0;
1117 }
1118 
1119 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
1120                                                   bool inner, u8 *bit_mask)
1121 {
1122         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1123 
1124         DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
1125         DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
1126         DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
1127         DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
1128 }
1129 
1130 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1131                                             struct mlx5dr_ste_build *sb,
1132                                             u8 *hw_ste_p)
1133 {
1134         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1135         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1136         u8 *tag = hw_ste->tag;
1137 
1138         DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1139         DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1140         DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1141         DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1142 
1143         return 0;
1144 }
1145 
1146 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
1147                                       struct mlx5dr_match_param *mask,
1148                                       bool inner, bool rx)
1149 {
1150         dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
1151 
1152         sb->rx = rx;
1153         sb->inner = inner;
1154         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
1155         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1156         sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
1157 }
1158 
1159 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
1160                                                   bool inner, u8 *bit_mask)
1161 {
1162         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1163 
1164         DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
1165         DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
1166         DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
1167         DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
1168 }
1169 
1170 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1171                                             struct mlx5dr_ste_build *sb,
1172                                             u8 *hw_ste_p)
1173 {
1174         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1175         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1176         u8 *tag = hw_ste->tag;
1177 
1178         DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1179         DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1180         DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1181         DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1182 
1183         return 0;
1184 }
1185 
1186 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
1187                                       struct mlx5dr_match_param *mask,
1188                                       bool inner, bool rx)
1189 {
1190         dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
1191 
1192         sb->rx = rx;
1193         sb->inner = inner;
1194         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
1195         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1196         sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
1197 }
1198 
1199 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
1200                                                       bool inner,
1201                                                       u8 *bit_mask)
1202 {
1203         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1204 
1205         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1206                           destination_address, mask, dst_ip_31_0);
1207         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1208                           source_address, mask, src_ip_31_0);
1209         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1210                           destination_port, mask, tcp_dport);
1211         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1212                           destination_port, mask, udp_dport);
1213         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1214                           source_port, mask, tcp_sport);
1215         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1216                           source_port, mask, udp_sport);
1217         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1218                           protocol, mask, ip_protocol);
1219         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1220                           fragmented, mask, frag);
1221         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1222                           dscp, mask, ip_dscp);
1223         DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1224                           ecn, mask, ip_ecn);
1225 
1226         if (mask->tcp_flags) {
1227                 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
1228                 mask->tcp_flags = 0;
1229         }
1230 }
1231 
1232 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1233                                                 struct mlx5dr_ste_build *sb,
1234                                                 u8 *hw_ste_p)
1235 {
1236         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1237         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1238         u8 *tag = hw_ste->tag;
1239 
1240         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
1241         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
1242         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
1243         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
1244         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
1245         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
1246         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
1247         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
1248         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
1249         DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
1250 
1251         if (spec->tcp_flags) {
1252                 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
1253                 spec->tcp_flags = 0;
1254         }
1255 
1256         return 0;
1257 }
1258 
1259 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
1260                                           struct mlx5dr_match_param *mask,
1261                                           bool inner, bool rx)
1262 {
1263         dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
1264 
1265         sb->rx = rx;
1266         sb->inner = inner;
1267         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
1268         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1269         sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
1270 }
1271 
1272 static void
1273 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1274                                         bool inner, u8 *bit_mask)
1275 {
1276         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1277         struct mlx5dr_match_misc *misc_mask = &value->misc;
1278 
1279         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
1280         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
1281         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
1282         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
1283         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
1284         DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
1285 
1286         if (mask->svlan_tag || mask->cvlan_tag) {
1287                 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
1288                 mask->cvlan_tag = 0;
1289                 mask->svlan_tag = 0;
1290         }
1291 
1292         if (inner) {
1293                 if (misc_mask->inner_second_cvlan_tag ||
1294                     misc_mask->inner_second_svlan_tag) {
1295                         MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1296                         misc_mask->inner_second_cvlan_tag = 0;
1297                         misc_mask->inner_second_svlan_tag = 0;
1298                 }
1299 
1300                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1301                                   second_vlan_id, misc_mask, inner_second_vid);
1302                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1303                                   second_cfi, misc_mask, inner_second_cfi);
1304                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1305                                   second_priority, misc_mask, inner_second_prio);
1306         } else {
1307                 if (misc_mask->outer_second_cvlan_tag ||
1308                     misc_mask->outer_second_svlan_tag) {
1309                         MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1310                         misc_mask->outer_second_cvlan_tag = 0;
1311                         misc_mask->outer_second_svlan_tag = 0;
1312                 }
1313 
1314                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1315                                   second_vlan_id, misc_mask, outer_second_vid);
1316                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1317                                   second_cfi, misc_mask, outer_second_cfi);
1318                 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1319                                   second_priority, misc_mask, outer_second_prio);
1320         }
1321 }
1322 
1323 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1324                                               bool inner, u8 *hw_ste_p)
1325 {
1326         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1327         struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1328         struct mlx5dr_match_misc *misc_spec = &value->misc;
1329         u8 *tag = hw_ste->tag;
1330 
1331         DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
1332         DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
1333         DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
1334         DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
1335         DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
1336 
1337         if (spec->ip_version) {
1338                 if (spec->ip_version == IP_VERSION_IPV4) {
1339                         MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
1340                         spec->ip_version = 0;
1341                 } else if (spec->ip_version == IP_VERSION_IPV6) {
1342                         MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
1343                         spec->ip_version = 0;
1344                 } else {
1345                         pr_info("Unsupported ip_version value\n");
1346                         return -EINVAL;
1347                 }
1348         }
1349 
1350         if (spec->cvlan_tag) {
1351                 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
1352                 spec->cvlan_tag = 0;
1353         } else if (spec->svlan_tag) {
1354                 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
1355                 spec->svlan_tag = 0;
1356         }
1357 
1358         if (inner) {
1359                 if (misc_spec->inner_second_cvlan_tag) {
1360                         MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1361                         misc_spec->inner_second_cvlan_tag = 0;
1362                 } else if (misc_spec->inner_second_svlan_tag) {
1363                         MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1364                         misc_spec->inner_second_svlan_tag = 0;
1365                 }
1366 
1367                 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
1368                 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
1369                 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
1370         } else {
1371                 if (misc_spec->outer_second_cvlan_tag) {
1372                         MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1373                         misc_spec->outer_second_cvlan_tag = 0;
1374                 } else if (misc_spec->outer_second_svlan_tag) {
1375                         MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1376                         misc_spec->outer_second_svlan_tag = 0;
1377                 }
1378                 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
1379                 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
1380                 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
1381         }
1382 
1383         return 0;
1384 }
1385 
1386 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1387                                              bool inner, u8 *bit_mask)
1388 {
1389         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1390 
1391         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
1392         DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1393 
1394         dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1395 }
1396 
1397 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1398                                        struct mlx5dr_ste_build *sb,
1399                                        u8 *hw_ste_p)
1400 {
1401         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1402         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1403         u8 *tag = hw_ste->tag;
1404 
1405         DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1406         DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1407 
1408         return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1409 }
1410 
1411 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
1412                                  struct mlx5dr_match_param *mask,
1413                                  bool inner, bool rx)
1414 {
1415         dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
1416         sb->rx = rx;
1417         sb->inner = inner;
1418         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
1419         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1420         sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
1421 }
1422 
1423 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1424                                              bool inner, u8 *bit_mask)
1425 {
1426         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1427 
1428         DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1429         DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1430 
1431         dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1432 }
1433 
1434 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1435                                        struct mlx5dr_ste_build *sb,
1436                                        u8 *hw_ste_p)
1437 {
1438         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1439         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1440         u8 *tag = hw_ste->tag;
1441 
1442         DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1443         DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1444 
1445         return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
1446 }
1447 
1448 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
1449                                  struct mlx5dr_match_param *mask,
1450                                  bool inner, bool rx)
1451 {
1452         dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
1453 
1454         sb->rx = rx;
1455         sb->inner = inner;
1456         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
1457         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1458         sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
1459 }
1460 
1461 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1462                                              bool inner, u8 *bit_mask)
1463 {
1464         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1465         struct mlx5dr_match_misc *misc = &value->misc;
1466 
1467         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1468         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1469         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1470         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1471         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1472         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1473         DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1474         DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1475 
1476         if (misc->vxlan_vni) {
1477                 MLX5_SET(ste_eth_l2_tnl, bit_mask,
1478                          l2_tunneling_network_id, (misc->vxlan_vni << 8));
1479                 misc->vxlan_vni = 0;
1480         }
1481 
1482         if (mask->svlan_tag || mask->cvlan_tag) {
1483                 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1484                 mask->cvlan_tag = 0;
1485                 mask->svlan_tag = 0;
1486         }
1487 }
1488 
1489 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1490                                        struct mlx5dr_ste_build *sb,
1491                                        u8 *hw_ste_p)
1492 {
1493         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1494         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1495         struct mlx5dr_match_misc *misc = &value->misc;
1496         u8 *tag = hw_ste->tag;
1497 
1498         DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1499         DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1500         DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1501         DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1502         DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1503         DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1504         DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1505 
1506         if (misc->vxlan_vni) {
1507                 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1508                          (misc->vxlan_vni << 8));
1509                 misc->vxlan_vni = 0;
1510         }
1511 
1512         if (spec->cvlan_tag) {
1513                 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1514                 spec->cvlan_tag = 0;
1515         } else if (spec->svlan_tag) {
1516                 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1517                 spec->svlan_tag = 0;
1518         }
1519 
1520         if (spec->ip_version) {
1521                 if (spec->ip_version == IP_VERSION_IPV4) {
1522                         MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1523                         spec->ip_version = 0;
1524                 } else if (spec->ip_version == IP_VERSION_IPV6) {
1525                         MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1526                         spec->ip_version = 0;
1527                 } else {
1528                         return -EINVAL;
1529                 }
1530         }
1531 
1532         return 0;
1533 }
1534 
1535 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
1536                                  struct mlx5dr_match_param *mask, bool inner, bool rx)
1537 {
1538         dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
1539 
1540         sb->rx = rx;
1541         sb->inner = inner;
1542         sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
1543         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1544         sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
1545 }
1546 
1547 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
1548                                                    bool inner, u8 *bit_mask)
1549 {
1550         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1551 
1552         DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
1553 }
1554 
1555 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1556                                              struct mlx5dr_ste_build *sb,
1557                                              u8 *hw_ste_p)
1558 {
1559         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1560         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1561         u8 *tag = hw_ste->tag;
1562 
1563         DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1564 
1565         return 0;
1566 }
1567 
1568 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
1569                                        struct mlx5dr_match_param *mask,
1570                                        bool inner, bool rx)
1571 {
1572         dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
1573 
1574         sb->rx = rx;
1575         sb->inner = inner;
1576         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
1577         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1578         sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
1579 }
1580 
1581 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
1582                                              bool inner, u8 *bit_mask)
1583 {
1584         struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1585 
1586         DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
1587         DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
1588         DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
1589         DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
1590         DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
1591         DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
1592         DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
1593         DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
1594         DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
1595 
1596         if (mask->tcp_flags) {
1597                 DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
1598                 mask->tcp_flags = 0;
1599         }
1600 }
1601 
1602 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1603                                        struct mlx5dr_ste_build *sb,
1604                                        u8 *hw_ste_p)
1605 {
1606         struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1607         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1608         u8 *tag = hw_ste->tag;
1609 
1610         DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1611         DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1612         DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1613         DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1614         DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1615         DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1616         DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1617         DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1618         DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1619 
1620         if (spec->tcp_flags) {
1621                 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1622                 spec->tcp_flags = 0;
1623         }
1624 
1625         return 0;
1626 }
1627 
1628 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
1629                                  struct mlx5dr_match_param *mask,
1630                                  bool inner, bool rx)
1631 {
1632         dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
1633 
1634         sb->rx = rx;
1635         sb->inner = inner;
1636         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
1637         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1638         sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
1639 }
1640 
1641 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1642                                              struct mlx5dr_ste_build *sb,
1643                                              u8 *hw_ste_p)
1644 {
1645         return 0;
1646 }
1647 
1648 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1649 {
1650         sb->rx = rx;
1651         sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1652         sb->byte_mask = 0;
1653         sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1654 }
1655 
1656 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
1657                                        bool inner, u8 *bit_mask)
1658 {
1659         struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1660 
1661         if (inner)
1662                 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
1663         else
1664                 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
1665 }
1666 
1667 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
1668                                  struct mlx5dr_ste_build *sb,
1669                                  u8 *hw_ste_p)
1670 {
1671         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1672         struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1673         u8 *tag = hw_ste->tag;
1674 
1675         if (sb->inner)
1676                 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
1677         else
1678                 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
1679 
1680         return 0;
1681 }
1682 
1683 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
1684                            struct mlx5dr_match_param *mask,
1685                            bool inner, bool rx)
1686 {
1687         dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
1688 
1689         sb->rx = rx;
1690         sb->inner = inner;
1691         sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
1692         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1693         sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
1694 }
1695 
1696 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
1697                                       bool inner, u8 *bit_mask)
1698 {
1699         struct mlx5dr_match_misc *misc_mask = &value->misc;
1700 
1701         DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
1702         DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
1703         DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
1704         DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
1705 
1706         DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
1707         DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
1708 }
1709 
1710 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
1711                                 struct mlx5dr_ste_build *sb,
1712                                 u8 *hw_ste_p)
1713 {
1714         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1715         struct  mlx5dr_match_misc *misc = &value->misc;
1716         u8 *tag = hw_ste->tag;
1717 
1718         DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1719 
1720         DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1721         DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1722         DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1723 
1724         DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1725 
1726         DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1727 
1728         return 0;
1729 }
1730 
1731 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
1732                           struct mlx5dr_match_param *mask, bool inner, bool rx)
1733 {
1734         dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
1735 
1736         sb->rx = rx;
1737         sb->inner = inner;
1738         sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
1739         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1740         sb->ste_build_tag_func = &dr_ste_build_gre_tag;
1741 }
1742 
1743 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
1744                                                 bool inner, u8 *bit_mask)
1745 {
1746         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1747 
1748         if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1749                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1750                                   misc_2_mask, outer_first_mpls_over_gre_label);
1751 
1752                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1753                                   misc_2_mask, outer_first_mpls_over_gre_exp);
1754 
1755                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1756                                   misc_2_mask, outer_first_mpls_over_gre_s_bos);
1757 
1758                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1759                                   misc_2_mask, outer_first_mpls_over_gre_ttl);
1760         } else {
1761                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1762                                   misc_2_mask, outer_first_mpls_over_udp_label);
1763 
1764                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1765                                   misc_2_mask, outer_first_mpls_over_udp_exp);
1766 
1767                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1768                                   misc_2_mask, outer_first_mpls_over_udp_s_bos);
1769 
1770                 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1771                                   misc_2_mask, outer_first_mpls_over_udp_ttl);
1772         }
1773 }
1774 
1775 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
1776                                           struct mlx5dr_ste_build *sb,
1777                                           u8 *hw_ste_p)
1778 {
1779         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1780         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1781         u8 *tag = hw_ste->tag;
1782 
1783         if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1784                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1785                                misc_2_mask, outer_first_mpls_over_gre_label);
1786 
1787                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1788                                misc_2_mask, outer_first_mpls_over_gre_exp);
1789 
1790                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1791                                misc_2_mask, outer_first_mpls_over_gre_s_bos);
1792 
1793                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1794                                misc_2_mask, outer_first_mpls_over_gre_ttl);
1795         } else {
1796                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1797                                misc_2_mask, outer_first_mpls_over_udp_label);
1798 
1799                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1800                                misc_2_mask, outer_first_mpls_over_udp_exp);
1801 
1802                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1803                                misc_2_mask, outer_first_mpls_over_udp_s_bos);
1804 
1805                 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1806                                misc_2_mask, outer_first_mpls_over_udp_ttl);
1807         }
1808         return 0;
1809 }
1810 
1811 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
1812                                     struct mlx5dr_match_param *mask,
1813                                     bool inner, bool rx)
1814 {
1815         dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
1816 
1817         sb->rx = rx;
1818         sb->inner = inner;
1819         sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
1820         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1821         sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
1822 }
1823 
1824 #define ICMP_TYPE_OFFSET_FIRST_DW               24
1825 #define ICMP_CODE_OFFSET_FIRST_DW               16
1826 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW       0
1827 
1828 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
1829                                                struct mlx5dr_cmd_caps *caps,
1830                                                u8 *bit_mask)
1831 {
1832         struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
1833         bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
1834         u32 icmp_header_data_mask;
1835         u32 icmp_type_mask;
1836         u32 icmp_code_mask;
1837         int dw0_location;
1838         int dw1_location;
1839 
1840         if (is_ipv4_mask) {
1841                 icmp_header_data_mask   = misc_3_mask->icmpv4_header_data;
1842                 icmp_type_mask          = misc_3_mask->icmpv4_type;
1843                 icmp_code_mask          = misc_3_mask->icmpv4_code;
1844                 dw0_location            = caps->flex_parser_id_icmp_dw0;
1845                 dw1_location            = caps->flex_parser_id_icmp_dw1;
1846         } else {
1847                 icmp_header_data_mask   = misc_3_mask->icmpv6_header_data;
1848                 icmp_type_mask          = misc_3_mask->icmpv6_type;
1849                 icmp_code_mask          = misc_3_mask->icmpv6_code;
1850                 dw0_location            = caps->flex_parser_id_icmpv6_dw0;
1851                 dw1_location            = caps->flex_parser_id_icmpv6_dw1;
1852         }
1853 
1854         switch (dw0_location) {
1855         case 4:
1856                 if (icmp_type_mask) {
1857                         MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1858                                  (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
1859                         if (is_ipv4_mask)
1860                                 misc_3_mask->icmpv4_type = 0;
1861                         else
1862                                 misc_3_mask->icmpv6_type = 0;
1863                 }
1864                 if (icmp_code_mask) {
1865                         u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
1866                                                flex_parser_4);
1867                         MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1868                                  cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
1869                         if (is_ipv4_mask)
1870                                 misc_3_mask->icmpv4_code = 0;
1871                         else
1872                                 misc_3_mask->icmpv6_code = 0;
1873                 }
1874                 break;
1875         default:
1876                 return -EINVAL;
1877         }
1878 
1879         switch (dw1_location) {
1880         case 5:
1881                 if (icmp_header_data_mask) {
1882                         MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
1883                                  (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1884                         if (is_ipv4_mask)
1885                                 misc_3_mask->icmpv4_header_data = 0;
1886                         else
1887                                 misc_3_mask->icmpv6_header_data = 0;
1888                 }
1889                 break;
1890         default:
1891                 return -EINVAL;
1892         }
1893 
1894         return 0;
1895 }
1896 
1897 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
1898                                           struct mlx5dr_ste_build *sb,
1899                                           u8 *hw_ste_p)
1900 {
1901         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
1902         struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1903         u8 *tag = hw_ste->tag;
1904         u32 icmp_header_data;
1905         int dw0_location;
1906         int dw1_location;
1907         u32 icmp_type;
1908         u32 icmp_code;
1909         bool is_ipv4;
1910 
1911         is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
1912         if (is_ipv4) {
1913                 icmp_header_data        = misc_3->icmpv4_header_data;
1914                 icmp_type               = misc_3->icmpv4_type;
1915                 icmp_code               = misc_3->icmpv4_code;
1916                 dw0_location            = sb->caps->flex_parser_id_icmp_dw0;
1917                 dw1_location            = sb->caps->flex_parser_id_icmp_dw1;
1918         } else {
1919                 icmp_header_data        = misc_3->icmpv6_header_data;
1920                 icmp_type               = misc_3->icmpv6_type;
1921                 icmp_code               = misc_3->icmpv6_code;
1922                 dw0_location            = sb->caps->flex_parser_id_icmpv6_dw0;
1923                 dw1_location            = sb->caps->flex_parser_id_icmpv6_dw1;
1924         }
1925 
1926         switch (dw0_location) {
1927         case 4:
1928                 if (icmp_type) {
1929                         MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1930                                  (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
1931                         if (is_ipv4)
1932                                 misc_3->icmpv4_type = 0;
1933                         else
1934                                 misc_3->icmpv6_type = 0;
1935                 }
1936 
1937                 if (icmp_code) {
1938                         u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
1939                                                flex_parser_4);
1940                         MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1941                                  cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
1942                         if (is_ipv4)
1943                                 misc_3->icmpv4_code = 0;
1944                         else
1945                                 misc_3->icmpv6_code = 0;
1946                 }
1947                 break;
1948         default:
1949                 return -EINVAL;
1950         }
1951 
1952         switch (dw1_location) {
1953         case 5:
1954                 if (icmp_header_data) {
1955                         MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1956                                  (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1957                         if (is_ipv4)
1958                                 misc_3->icmpv4_header_data = 0;
1959                         else
1960                                 misc_3->icmpv6_header_data = 0;
1961                 }
1962                 break;
1963         default:
1964                 return -EINVAL;
1965         }
1966 
1967         return 0;
1968 }
1969 
1970 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
1971                                    struct mlx5dr_match_param *mask,
1972                                    struct mlx5dr_cmd_caps *caps,
1973                                    bool inner, bool rx)
1974 {
1975         int ret;
1976 
1977         ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
1978         if (ret)
1979                 return ret;
1980 
1981         sb->rx = rx;
1982         sb->inner = inner;
1983         sb->caps = caps;
1984         sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
1985         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1986         sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
1987 
1988         return 0;
1989 }
1990 
1991 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
1992                                                   bool inner, u8 *bit_mask)
1993 {
1994         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1995 
1996         DR_STE_SET_MASK_V(general_purpose, bit_mask,
1997                           general_purpose_lookup_field, misc_2_mask,
1998                           metadata_reg_a);
1999 }
2000 
2001 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
2002                                             struct mlx5dr_ste_build *sb,
2003                                             u8 *hw_ste_p)
2004 {
2005         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2006         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2007         u8 *tag = hw_ste->tag;
2008 
2009         DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
2010                        misc_2_mask, metadata_reg_a);
2011 
2012         return 0;
2013 }
2014 
2015 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
2016                                       struct mlx5dr_match_param *mask,
2017                                       bool inner, bool rx)
2018 {
2019         dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
2020 
2021         sb->rx = rx;
2022         sb->inner = inner;
2023         sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
2024         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2025         sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
2026 }
2027 
2028 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
2029                                               bool inner, u8 *bit_mask)
2030 {
2031         struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2032 
2033         if (inner) {
2034                 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2035                                   inner_tcp_seq_num);
2036                 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2037                                   inner_tcp_ack_num);
2038         } else {
2039                 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2040                                   outer_tcp_seq_num);
2041                 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2042                                   outer_tcp_ack_num);
2043         }
2044 }
2045 
2046 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
2047                                         struct mlx5dr_ste_build *sb,
2048                                         u8 *hw_ste_p)
2049 {
2050         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2051         struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2052         u8 *tag = hw_ste->tag;
2053 
2054         if (sb->inner) {
2055                 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
2056                 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
2057         } else {
2058                 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
2059                 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
2060         }
2061 
2062         return 0;
2063 }
2064 
2065 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
2066                                   struct mlx5dr_match_param *mask,
2067                                   bool inner, bool rx)
2068 {
2069         dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
2070 
2071         sb->rx = rx;
2072         sb->inner = inner;
2073         sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
2074         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2075         sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
2076 }
2077 
2078 static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
2079                                                   bool inner, u8 *bit_mask)
2080 {
2081         struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2082 
2083         if (misc_3_mask->outer_vxlan_gpe_flags ||
2084             misc_3_mask->outer_vxlan_gpe_next_protocol) {
2085                 MLX5_SET(ste_flex_parser_tnl, bit_mask,
2086                          flex_parser_tunneling_header_63_32,
2087                          (misc_3_mask->outer_vxlan_gpe_flags << 24) |
2088                          (misc_3_mask->outer_vxlan_gpe_next_protocol));
2089                 misc_3_mask->outer_vxlan_gpe_flags = 0;
2090                 misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
2091         }
2092 
2093         if (misc_3_mask->outer_vxlan_gpe_vni) {
2094                 MLX5_SET(ste_flex_parser_tnl, bit_mask,
2095                          flex_parser_tunneling_header_31_0,
2096                          misc_3_mask->outer_vxlan_gpe_vni << 8);
2097                 misc_3_mask->outer_vxlan_gpe_vni = 0;
2098         }
2099 }
2100 
2101 static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
2102                                             struct mlx5dr_ste_build *sb,
2103                                             u8 *hw_ste_p)
2104 {
2105         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2106         struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2107         u8 *tag = hw_ste->tag;
2108 
2109         if (misc3->outer_vxlan_gpe_flags ||
2110             misc3->outer_vxlan_gpe_next_protocol) {
2111                 MLX5_SET(ste_flex_parser_tnl, tag,
2112                          flex_parser_tunneling_header_63_32,
2113                          (misc3->outer_vxlan_gpe_flags << 24) |
2114                          (misc3->outer_vxlan_gpe_next_protocol));
2115                 misc3->outer_vxlan_gpe_flags = 0;
2116                 misc3->outer_vxlan_gpe_next_protocol = 0;
2117         }
2118 
2119         if (misc3->outer_vxlan_gpe_vni) {
2120                 MLX5_SET(ste_flex_parser_tnl, tag,
2121                          flex_parser_tunneling_header_31_0,
2122                          misc3->outer_vxlan_gpe_vni << 8);
2123                 misc3->outer_vxlan_gpe_vni = 0;
2124         }
2125 
2126         return 0;
2127 }
2128 
2129 void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
2130                                       struct mlx5dr_match_param *mask,
2131                                       bool inner, bool rx)
2132 {
2133         dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
2134 
2135         sb->rx = rx;
2136         sb->inner = inner;
2137         sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2138         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2139         sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
2140 }
2141 
2142 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
2143                                              u8 *bit_mask)
2144 {
2145         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2146 
2147         DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
2148                           misc_2_mask, metadata_reg_c_0);
2149         DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
2150                           misc_2_mask, metadata_reg_c_1);
2151         DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
2152                           misc_2_mask, metadata_reg_c_2);
2153         DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
2154                           misc_2_mask, metadata_reg_c_3);
2155 }
2156 
2157 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
2158                                        struct mlx5dr_ste_build *sb,
2159                                        u8 *hw_ste_p)
2160 {
2161         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2162         struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2163         u8 *tag = hw_ste->tag;
2164 
2165         DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
2166         DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
2167         DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
2168         DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
2169 
2170         return 0;
2171 }
2172 
2173 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
2174                                  struct mlx5dr_match_param *mask,
2175                                  bool inner, bool rx)
2176 {
2177         dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
2178 
2179         sb->rx = rx;
2180         sb->inner = inner;
2181         sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
2182         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2183         sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
2184 }
2185 
2186 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
2187                                              u8 *bit_mask)
2188 {
2189         struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2190 
2191         DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
2192                           misc_2_mask, metadata_reg_c_4);
2193         DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
2194                           misc_2_mask, metadata_reg_c_5);
2195         DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
2196                           misc_2_mask, metadata_reg_c_6);
2197         DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
2198                           misc_2_mask, metadata_reg_c_7);
2199 }
2200 
2201 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
2202                                        struct mlx5dr_ste_build *sb,
2203                                        u8 *hw_ste_p)
2204 {
2205         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2206         struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2207         u8 *tag = hw_ste->tag;
2208 
2209         DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
2210         DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
2211         DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
2212         DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
2213 
2214         return 0;
2215 }
2216 
2217 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
2218                                  struct mlx5dr_match_param *mask,
2219                                  bool inner, bool rx)
2220 {
2221         dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
2222 
2223         sb->rx = rx;
2224         sb->inner = inner;
2225         sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
2226         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2227         sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
2228 }
2229 
2230 static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
2231                                               u8 *bit_mask)
2232 {
2233         struct mlx5dr_match_misc *misc_mask = &value->misc;
2234 
2235         /* Partial misc source_port is not supported */
2236         if (misc_mask->source_port && misc_mask->source_port != 0xffff)
2237                 return -EINVAL;
2238 
2239         /* Partial misc source_eswitch_owner_vhca_id is not supported */
2240         if (misc_mask->source_eswitch_owner_vhca_id &&
2241             misc_mask->source_eswitch_owner_vhca_id != 0xffff)
2242                 return -EINVAL;
2243 
2244         DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
2245         DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
2246         misc_mask->source_eswitch_owner_vhca_id = 0;
2247 
2248         return 0;
2249 }
2250 
2251 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
2252                                          struct mlx5dr_ste_build *sb,
2253                                          u8 *hw_ste_p)
2254 {
2255         struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
2256         struct mlx5dr_match_misc *misc = &value->misc;
2257         struct mlx5dr_cmd_vport_cap *vport_cap;
2258         struct mlx5dr_domain *dmn = sb->dmn;
2259         struct mlx5dr_cmd_caps *caps;
2260         u8 *bit_mask = sb->bit_mask;
2261         u8 *tag = hw_ste->tag;
2262         bool source_gvmi_set;
2263 
2264         DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
2265 
2266         if (sb->vhca_id_valid) {
2267                 /* Find port GVMI based on the eswitch_owner_vhca_id */
2268                 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
2269                         caps = &dmn->info.caps;
2270                 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
2271                                            dmn->peer_dmn->info.caps.gvmi))
2272                         caps = &dmn->peer_dmn->info.caps;
2273                 else
2274                         return -EINVAL;
2275         } else {
2276                 caps = &dmn->info.caps;
2277         }
2278 
2279         vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
2280         if (!vport_cap)
2281                 return -EINVAL;
2282 
2283         source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
2284         if (vport_cap->vport_gvmi && source_gvmi_set)
2285                 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
2286 
2287         misc->source_eswitch_owner_vhca_id = 0;
2288         misc->source_port = 0;
2289 
2290         return 0;
2291 }
2292 
2293 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
2294                                   struct mlx5dr_match_param *mask,
2295                                   struct mlx5dr_domain *dmn,
2296                                   bool inner, bool rx)
2297 {
2298         int ret;
2299 
2300         /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
2301         sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
2302 
2303         ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2304         if (ret)
2305                 return ret;
2306 
2307         sb->rx = rx;
2308         sb->dmn = dmn;
2309         sb->inner = inner;
2310         sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
2311         sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2312         sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
2313 
2314         return 0;
2315 }

/* [<][>][^][v][top][bottom][index][help] */