root/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_fpga_is_ipsec_device
  2. mlx5_fpga_ipsec_send_complete
  3. syndrome_to_errno
  4. mlx5_fpga_ipsec_recv
  5. mlx5_fpga_ipsec_cmd_exec
  6. mlx5_fpga_ipsec_cmd_wait
  7. is_v2_sadb_supported
  8. mlx5_fpga_ipsec_update_hw_sa
  9. mlx5_fpga_ipsec_device_caps
  10. mlx5_fpga_ipsec_counters_count
  11. mlx5_fpga_ipsec_counters_read
  12. mlx5_fpga_ipsec_set_caps
  13. mlx5_fpga_ipsec_enable_supported_caps
  14. mlx5_fpga_ipsec_build_hw_xfrm
  15. mlx5_fpga_ipsec_build_hw_sa
  16. is_full_mask
  17. validate_fpga_full_mask
  18. mlx5_is_fpga_ipsec_rule
  19. mlx5_is_fpga_egress_ipsec_rule
  20. mlx5_fpga_ipsec_create_sa_ctx
  21. mlx5_fpga_ipsec_fs_create_sa_ctx
  22. mlx5_fpga_ipsec_release_sa_ctx
  23. mlx5_fpga_ipsec_delete_sa_ctx
  24. _rule_search
  25. rule_search
  26. _rule_insert
  27. rule_insert
  28. _rule_delete
  29. rule_delete
  30. restore_spec_mailbox
  31. modify_spec_mailbox
  32. egress_to_fs_ft
  33. fpga_ipsec_fs_create_flow_group
  34. fpga_ipsec_fs_create_fte
  35. fpga_ipsec_fs_update_fte
  36. fpga_ipsec_fs_delete_fte
  37. mlx5_fpga_ipsec_fs_create_flow_group_egress
  38. mlx5_fpga_ipsec_fs_create_fte_egress
  39. mlx5_fpga_ipsec_fs_update_fte_egress
  40. mlx5_fpga_ipsec_fs_delete_fte_egress
  41. mlx5_fpga_ipsec_fs_create_flow_group_ingress
  42. mlx5_fpga_ipsec_fs_create_fte_ingress
  43. mlx5_fpga_ipsec_fs_update_fte_ingress
  44. mlx5_fpga_ipsec_fs_delete_fte_ingress
  45. mlx5_fs_cmd_get_default_ipsec_fpga_cmds
  46. mlx5_fpga_ipsec_init
  47. destroy_rules_rb
  48. mlx5_fpga_ipsec_cleanup
  49. mlx5_fpga_ipsec_build_fs_cmds
  50. mlx5_fpga_esp_validate_xfrm_attrs
  51. mlx5_fpga_esp_create_xfrm
  52. mlx5_fpga_esp_destroy_xfrm
  53. mlx5_fpga_esp_modify_xfrm

   1 /*
   2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  *
  32  */
  33 
  34 #include <linux/rhashtable.h>
  35 #include <linux/mlx5/driver.h>
  36 #include <linux/mlx5/fs_helpers.h>
  37 #include <linux/mlx5/fs.h>
  38 #include <linux/rbtree.h>
  39 
  40 #include "mlx5_core.h"
  41 #include "fs_cmd.h"
  42 #include "fpga/ipsec.h"
  43 #include "fpga/sdk.h"
  44 #include "fpga/core.h"
  45 
  46 enum mlx5_fpga_ipsec_cmd_status {
  47         MLX5_FPGA_IPSEC_CMD_PENDING,
  48         MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
  49         MLX5_FPGA_IPSEC_CMD_COMPLETE,
  50 };
  51 
  52 struct mlx5_fpga_ipsec_cmd_context {
  53         struct mlx5_fpga_dma_buf buf;
  54         enum mlx5_fpga_ipsec_cmd_status status;
  55         struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
  56         int status_code;
  57         struct completion complete;
  58         struct mlx5_fpga_device *dev;
  59         struct list_head list; /* Item in pending_cmds */
  60         u8 command[0];
  61 };
  62 
  63 struct mlx5_fpga_esp_xfrm;
  64 
  65 struct mlx5_fpga_ipsec_sa_ctx {
  66         struct rhash_head               hash;
  67         struct mlx5_ifc_fpga_ipsec_sa   hw_sa;
  68         struct mlx5_core_dev            *dev;
  69         struct mlx5_fpga_esp_xfrm       *fpga_xfrm;
  70 };
  71 
  72 struct mlx5_fpga_esp_xfrm {
  73         unsigned int                    num_rules;
  74         struct mlx5_fpga_ipsec_sa_ctx   *sa_ctx;
  75         struct mutex                    lock; /* xfrm lock */
  76         struct mlx5_accel_esp_xfrm      accel_xfrm;
  77 };
  78 
  79 struct mlx5_fpga_ipsec_rule {
  80         struct rb_node                  node;
  81         struct fs_fte                   *fte;
  82         struct mlx5_fpga_ipsec_sa_ctx   *ctx;
  83 };
  84 
  85 static const struct rhashtable_params rhash_sa = {
  86         /* Keep out "cmd" field from the key as it's
  87          * value is not constant during the lifetime
  88          * of the key object.
  89          */
  90         .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
  91                    FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
  92         .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
  93                       FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
  94         .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
  95         .automatic_shrinking = true,
  96         .min_size = 1,
  97 };
  98 
  99 struct mlx5_fpga_ipsec {
 100         struct mlx5_fpga_device *fdev;
 101         struct list_head pending_cmds;
 102         spinlock_t pending_cmds_lock; /* Protects pending_cmds */
 103         u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
 104         struct mlx5_fpga_conn *conn;
 105 
 106         struct notifier_block   fs_notifier_ingress_bypass;
 107         struct notifier_block   fs_notifier_egress;
 108 
 109         /* Map hardware SA           -->  SA context
 110          *     (mlx5_fpga_ipsec_sa)       (mlx5_fpga_ipsec_sa_ctx)
 111          * We will use this hash to avoid SAs duplication in fpga which
 112          * aren't allowed
 113          */
 114         struct rhashtable sa_hash;      /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
 115         struct mutex sa_hash_lock;
 116 
 117         /* Tree holding all rules for this fpga device
 118          * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
 119          */
 120         struct rb_root rules_rb;
 121         struct mutex rules_rb_lock; /* rules lock */
 122 };
 123 
 124 static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
 125 {
 126         if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
 127                 return false;
 128 
 129         if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
 130             MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
 131                 return false;
 132 
 133         if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
 134             MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
 135                 return false;
 136 
 137         return true;
 138 }
 139 
 140 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
 141                                           struct mlx5_fpga_device *fdev,
 142                                           struct mlx5_fpga_dma_buf *buf,
 143                                           u8 status)
 144 {
 145         struct mlx5_fpga_ipsec_cmd_context *context;
 146 
 147         if (status) {
 148                 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
 149                                        buf);
 150                 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
 151                                status);
 152                 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
 153                 complete(&context->complete);
 154         }
 155 }
 156 
 157 static inline
 158 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
 159 {
 160         switch (syndrome) {
 161         case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
 162                 return 0;
 163         case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
 164                 return -EEXIST;
 165         case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
 166                 return -EINVAL;
 167         case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
 168                 return -EIO;
 169         }
 170         return -EIO;
 171 }
 172 
 173 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
 174 {
 175         struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
 176         struct mlx5_fpga_ipsec_cmd_context *context;
 177         enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
 178         struct mlx5_fpga_device *fdev = cb_arg;
 179         unsigned long flags;
 180 
 181         if (buf->sg[0].size < sizeof(*resp)) {
 182                 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
 183                                buf->sg[0].size, sizeof(*resp));
 184                 return;
 185         }
 186 
 187         mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
 188                       ntohl(resp->syndrome));
 189 
 190         spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
 191         context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
 192                                            struct mlx5_fpga_ipsec_cmd_context,
 193                                            list);
 194         if (context)
 195                 list_del(&context->list);
 196         spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
 197 
 198         if (!context) {
 199                 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
 200                 return;
 201         }
 202         mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
 203 
 204         syndrome = ntohl(resp->syndrome);
 205         context->status_code = syndrome_to_errno(syndrome);
 206         context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
 207         memcpy(&context->resp, resp, sizeof(*resp));
 208 
 209         if (context->status_code)
 210                 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
 211                                syndrome);
 212 
 213         complete(&context->complete);
 214 }
 215 
 216 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
 217                                       const void *cmd, int cmd_size)
 218 {
 219         struct mlx5_fpga_ipsec_cmd_context *context;
 220         struct mlx5_fpga_device *fdev = mdev->fpga;
 221         unsigned long flags;
 222         int res;
 223 
 224         if (!fdev || !fdev->ipsec)
 225                 return ERR_PTR(-EOPNOTSUPP);
 226 
 227         if (cmd_size & 3)
 228                 return ERR_PTR(-EINVAL);
 229 
 230         context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
 231         if (!context)
 232                 return ERR_PTR(-ENOMEM);
 233 
 234         context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
 235         context->dev = fdev;
 236         context->buf.complete = mlx5_fpga_ipsec_send_complete;
 237         init_completion(&context->complete);
 238         memcpy(&context->command, cmd, cmd_size);
 239         context->buf.sg[0].size = cmd_size;
 240         context->buf.sg[0].data = &context->command;
 241 
 242         spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
 243         res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
 244         if (!res)
 245                 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
 246         spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
 247 
 248         if (res) {
 249                 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
 250                 kfree(context);
 251                 return ERR_PTR(res);
 252         }
 253 
 254         /* Context should be freed by the caller after completion. */
 255         return context;
 256 }
 257 
 258 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
 259 {
 260         struct mlx5_fpga_ipsec_cmd_context *context = ctx;
 261         unsigned long timeout =
 262                 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
 263         int res;
 264 
 265         res = wait_for_completion_timeout(&context->complete, timeout);
 266         if (!res) {
 267                 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
 268                 return -ETIMEDOUT;
 269         }
 270 
 271         if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
 272                 res = context->status_code;
 273         else
 274                 res = -EIO;
 275 
 276         return res;
 277 }
 278 
 279 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
 280 {
 281         if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
 282                 return true;
 283         return false;
 284 }
 285 
 286 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
 287                                         struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
 288                                         int opcode)
 289 {
 290         struct mlx5_core_dev *dev = fdev->mdev;
 291         struct mlx5_ifc_fpga_ipsec_sa *sa;
 292         struct mlx5_fpga_ipsec_cmd_context *cmd_context;
 293         size_t sa_cmd_size;
 294         int err;
 295 
 296         hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
 297         if (is_v2_sadb_supported(fdev->ipsec))
 298                 sa_cmd_size = sizeof(*hw_sa);
 299         else
 300                 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
 301 
 302         cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
 303                         mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
 304         if (IS_ERR(cmd_context))
 305                 return PTR_ERR(cmd_context);
 306 
 307         err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
 308         if (err)
 309                 goto out;
 310 
 311         sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
 312         if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
 313                 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
 314                               ntohl(sa->ipsec_sa_v1.sw_sa_handle),
 315                               ntohl(cmd_context->resp.sw_sa_handle));
 316                 err = -EIO;
 317         }
 318 
 319 out:
 320         kfree(cmd_context);
 321         return err;
 322 }
 323 
 324 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
 325 {
 326         struct mlx5_fpga_device *fdev = mdev->fpga;
 327         u32 ret = 0;
 328 
 329         if (mlx5_fpga_is_ipsec_device(mdev)) {
 330                 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
 331                 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
 332         } else {
 333                 return ret;
 334         }
 335 
 336         if (!fdev->ipsec)
 337                 return ret;
 338 
 339         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
 340                 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
 341 
 342         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
 343                 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
 344 
 345         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
 346                 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
 347 
 348         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
 349                 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
 350 
 351         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
 352                 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
 353                 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
 354         }
 355 
 356         return ret;
 357 }
 358 
 359 unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
 360 {
 361         struct mlx5_fpga_device *fdev = mdev->fpga;
 362 
 363         if (!fdev || !fdev->ipsec)
 364                 return 0;
 365 
 366         return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
 367                         number_of_ipsec_counters);
 368 }
 369 
 370 int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
 371                                   unsigned int counters_count)
 372 {
 373         struct mlx5_fpga_device *fdev = mdev->fpga;
 374         unsigned int i;
 375         __be32 *data;
 376         u32 count;
 377         u64 addr;
 378         int ret;
 379 
 380         if (!fdev || !fdev->ipsec)
 381                 return 0;
 382 
 383         addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
 384                              ipsec_counters_addr_low) +
 385                ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
 386                              ipsec_counters_addr_high) << 32);
 387 
 388         count = mlx5_fpga_ipsec_counters_count(mdev);
 389 
 390         data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
 391         if (!data) {
 392                 ret = -ENOMEM;
 393                 goto out;
 394         }
 395 
 396         ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
 397                                  MLX5_FPGA_ACCESS_TYPE_DONTCARE);
 398         if (ret < 0) {
 399                 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
 400                               ret);
 401                 goto out;
 402         }
 403         ret = 0;
 404 
 405         if (count > counters_count)
 406                 count = counters_count;
 407 
 408         /* Each counter is low word, then high. But each word is big-endian */
 409         for (i = 0; i < count; i++)
 410                 counters[i] = (u64)ntohl(data[i * 2]) |
 411                               ((u64)ntohl(data[i * 2 + 1]) << 32);
 412 
 413 out:
 414         kfree(data);
 415         return ret;
 416 }
 417 
 418 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
 419 {
 420         struct mlx5_fpga_ipsec_cmd_context *context;
 421         struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
 422         int err;
 423 
 424         cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
 425         cmd.flags = htonl(flags);
 426         context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
 427         if (IS_ERR(context))
 428                 return PTR_ERR(context);
 429 
 430         err = mlx5_fpga_ipsec_cmd_wait(context);
 431         if (err)
 432                 goto out;
 433 
 434         if ((context->resp.flags & cmd.flags) != cmd.flags) {
 435                 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
 436                               cmd.flags,
 437                               context->resp.flags);
 438                 err = -EIO;
 439         }
 440 
 441 out:
 442         kfree(context);
 443         return err;
 444 }
 445 
 446 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
 447 {
 448         u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
 449         u32 flags = 0;
 450 
 451         if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
 452                 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
 453 
 454         return mlx5_fpga_ipsec_set_caps(mdev, flags);
 455 }
 456 
 457 static void
 458 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
 459                               const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
 460                               struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
 461 {
 462         const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
 463 
 464         /* key */
 465         memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
 466                aes_gcm->key_len / 8);
 467         /* Duplicate 128 bit key twice according to HW layout */
 468         if (aes_gcm->key_len == 128)
 469                 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
 470                        aes_gcm->aes_key, aes_gcm->key_len / 8);
 471 
 472         /* salt and seq_iv */
 473         memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
 474                sizeof(aes_gcm->seq_iv));
 475         memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
 476                sizeof(aes_gcm->salt));
 477 
 478         /* esn */
 479         if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
 480                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
 481                 hw_sa->ipsec_sa_v1.flags |=
 482                                 (xfrm_attrs->flags &
 483                                  MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
 484                                         MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
 485                 hw_sa->esn = htonl(xfrm_attrs->esn);
 486         } else {
 487                 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
 488                 hw_sa->ipsec_sa_v1.flags &=
 489                                 ~(xfrm_attrs->flags &
 490                                   MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
 491                                         MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
 492                 hw_sa->esn = 0;
 493         }
 494 
 495         /* rx handle */
 496         hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
 497 
 498         /* enc mode */
 499         switch (aes_gcm->key_len) {
 500         case 128:
 501                 hw_sa->ipsec_sa_v1.enc_mode =
 502                         MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
 503                 break;
 504         case 256:
 505                 hw_sa->ipsec_sa_v1.enc_mode =
 506                         MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
 507                 break;
 508         }
 509 
 510         /* flags */
 511         hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
 512                         MLX5_FPGA_IPSEC_SA_SPI_EN |
 513                         MLX5_FPGA_IPSEC_SA_IP_ESP;
 514 
 515         if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
 516                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
 517         else
 518                 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
 519 }
 520 
 521 static void
 522 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
 523                             struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
 524                             const __be32 saddr[4],
 525                             const __be32 daddr[4],
 526                             const __be32 spi, bool is_ipv6,
 527                             struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
 528 {
 529         mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
 530 
 531         /* IPs */
 532         memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
 533         memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
 534 
 535         /* SPI */
 536         hw_sa->ipsec_sa_v1.spi = spi;
 537 
 538         /* flags */
 539         if (is_ipv6)
 540                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
 541 }
 542 
 543 static bool is_full_mask(const void *p, size_t len)
 544 {
 545         WARN_ON(len % 4);
 546 
 547         return !memchr_inv(p, 0xff, len);
 548 }
 549 
 550 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
 551                                     const u32 *match_c,
 552                                     const u32 *match_v)
 553 {
 554         const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
 555                                                  match_c,
 556                                                  misc_parameters);
 557         const void *headers_c = MLX5_ADDR_OF(fte_match_param,
 558                                              match_c,
 559                                              outer_headers);
 560         const void *headers_v = MLX5_ADDR_OF(fte_match_param,
 561                                              match_v,
 562                                              outer_headers);
 563 
 564         if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
 565                 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 566                                                     headers_c,
 567                                                     src_ipv4_src_ipv6.ipv4_layout.ipv4);
 568                 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 569                                                     headers_c,
 570                                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
 571 
 572                 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
 573                                                               ipv4)) ||
 574                     !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
 575                                                               ipv4)))
 576                         return false;
 577         } else {
 578                 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 579                                                     headers_c,
 580                                                     src_ipv4_src_ipv6.ipv6_layout.ipv6);
 581                 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 582                                                     headers_c,
 583                                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
 584 
 585                 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
 586                                                               ipv6)) ||
 587                     !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
 588                                                               ipv6)))
 589                         return false;
 590         }
 591 
 592         if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
 593                                        outer_esp_spi),
 594                           MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
 595                 return false;
 596 
 597         return true;
 598 }
 599 
 600 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
 601                                     u8 match_criteria_enable,
 602                                     const u32 *match_c,
 603                                     const u32 *match_v)
 604 {
 605         u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
 606         bool ipv6_flow;
 607 
 608         ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
 609 
 610         if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
 611             mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
 612             mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
 613             mlx5_fs_is_vxlan_flow(match_c) ||
 614             !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
 615               ipv6_flow))
 616                 return false;
 617 
 618         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
 619                 return false;
 620 
 621         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
 622             mlx5_fs_is_outer_ipsec_flow(match_c))
 623                 return false;
 624 
 625         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
 626             ipv6_flow)
 627                 return false;
 628 
 629         if (!validate_fpga_full_mask(dev, match_c, match_v))
 630                 return false;
 631 
 632         return true;
 633 }
 634 
 635 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
 636                                            u8 match_criteria_enable,
 637                                            const u32 *match_c,
 638                                            const u32 *match_v,
 639                                            struct mlx5_flow_act *flow_act,
 640                                            struct mlx5_flow_context *flow_context)
 641 {
 642         const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
 643                                            outer_headers);
 644         bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
 645                         MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
 646         bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
 647                         MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
 648         int ret;
 649 
 650         ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
 651                                       match_v);
 652         if (!ret)
 653                 return ret;
 654 
 655         if (is_dmac || is_smac ||
 656             (match_criteria_enable &
 657              ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
 658             (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
 659              (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
 660                 return false;
 661 
 662         return true;
 663 }
 664 
 665 void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
 666                                     struct mlx5_accel_esp_xfrm *accel_xfrm,
 667                                     const __be32 saddr[4],
 668                                     const __be32 daddr[4],
 669                                     const __be32 spi, bool is_ipv6)
 670 {
 671         struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
 672         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
 673                         container_of(accel_xfrm, typeof(*fpga_xfrm),
 674                                      accel_xfrm);
 675         struct mlx5_fpga_device *fdev = mdev->fpga;
 676         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
 677         int opcode, err;
 678         void *context;
 679 
 680         /* alloc SA */
 681         sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
 682         if (!sa_ctx)
 683                 return ERR_PTR(-ENOMEM);
 684 
 685         sa_ctx->dev = mdev;
 686 
 687         /* build candidate SA */
 688         mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
 689                                     saddr, daddr, spi, is_ipv6,
 690                                     &sa_ctx->hw_sa);
 691 
 692         mutex_lock(&fpga_xfrm->lock);
 693 
 694         if (fpga_xfrm->sa_ctx) {        /* multiple rules for same accel_xfrm */
 695                 /* all rules must be with same IPs and SPI */
 696                 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
 697                            sizeof(sa_ctx->hw_sa))) {
 698                         context = ERR_PTR(-EINVAL);
 699                         goto exists;
 700                 }
 701 
 702                 ++fpga_xfrm->num_rules;
 703                 context = fpga_xfrm->sa_ctx;
 704                 goto exists;
 705         }
 706 
 707         /* This is unbounded fpga_xfrm, try to add to hash */
 708         mutex_lock(&fipsec->sa_hash_lock);
 709 
 710         err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
 711                                             rhash_sa);
 712         if (err) {
 713                 /* Can't bound different accel_xfrm to already existing sa_ctx.
 714                  * This is because we can't support multiple ketmats for
 715                  * same IPs and SPI
 716                  */
 717                 context = ERR_PTR(-EEXIST);
 718                 goto unlock_hash;
 719         }
 720 
 721         /* Bound accel_xfrm to sa_ctx */
 722         opcode = is_v2_sadb_supported(fdev->ipsec) ?
 723                         MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
 724                         MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
 725         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
 726         sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
 727         if (err) {
 728                 context = ERR_PTR(err);
 729                 goto delete_hash;
 730         }
 731 
 732         mutex_unlock(&fipsec->sa_hash_lock);
 733 
 734         ++fpga_xfrm->num_rules;
 735         fpga_xfrm->sa_ctx = sa_ctx;
 736         sa_ctx->fpga_xfrm = fpga_xfrm;
 737 
 738         mutex_unlock(&fpga_xfrm->lock);
 739 
 740         return sa_ctx;
 741 
 742 delete_hash:
 743         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
 744                                        rhash_sa));
 745 unlock_hash:
 746         mutex_unlock(&fipsec->sa_hash_lock);
 747 
 748 exists:
 749         mutex_unlock(&fpga_xfrm->lock);
 750         kfree(sa_ctx);
 751         return context;
 752 }
 753 
 754 static void *
 755 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
 756                                  struct fs_fte *fte,
 757                                  bool is_egress)
 758 {
 759         struct mlx5_accel_esp_xfrm *accel_xfrm;
 760         __be32 saddr[4], daddr[4], spi;
 761         struct mlx5_flow_group *fg;
 762         bool is_ipv6 = false;
 763 
 764         fs_get_obj(fg, fte->node.parent);
 765         /* validate */
 766         if (is_egress &&
 767             !mlx5_is_fpga_egress_ipsec_rule(mdev,
 768                                             fg->mask.match_criteria_enable,
 769                                             fg->mask.match_criteria,
 770                                             fte->val,
 771                                             &fte->action,
 772                                             &fte->flow_context))
 773                 return ERR_PTR(-EINVAL);
 774         else if (!mlx5_is_fpga_ipsec_rule(mdev,
 775                                           fg->mask.match_criteria_enable,
 776                                           fg->mask.match_criteria,
 777                                           fte->val))
 778                 return ERR_PTR(-EINVAL);
 779 
 780         /* get xfrm context */
 781         accel_xfrm =
 782                 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
 783 
 784         /* IPs */
 785         if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
 786                                        fte->val)) {
 787                 memcpy(&saddr[3],
 788                        MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 789                                     fte->val,
 790                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
 791                                     sizeof(saddr[3]));
 792                 memcpy(&daddr[3],
 793                        MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 794                                     fte->val,
 795                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 796                                     sizeof(daddr[3]));
 797         } else {
 798                 memcpy(saddr,
 799                        MLX5_ADDR_OF(fte_match_param,
 800                                     fte->val,
 801                                     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
 802                                     sizeof(saddr));
 803                 memcpy(daddr,
 804                        MLX5_ADDR_OF(fte_match_param,
 805                                     fte->val,
 806                                     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 807                                     sizeof(daddr));
 808                 is_ipv6 = true;
 809         }
 810 
 811         /* SPI */
 812         spi = MLX5_GET_BE(typeof(spi),
 813                           fte_match_param, fte->val,
 814                           misc_parameters.outer_esp_spi);
 815 
 816         /* create */
 817         return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
 818                                              saddr, daddr,
 819                                              spi, is_ipv6);
 820 }
 821 
 822 static void
 823 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
 824 {
 825         struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
 826         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
 827         int opcode = is_v2_sadb_supported(fdev->ipsec) ?
 828                         MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
 829                         MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
 830         int err;
 831 
 832         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
 833         sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
 834         if (err) {
 835                 WARN_ON(err);
 836                 return;
 837         }
 838 
 839         mutex_lock(&fipsec->sa_hash_lock);
 840         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
 841                                        rhash_sa));
 842         mutex_unlock(&fipsec->sa_hash_lock);
 843 }
 844 
 845 void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
 846 {
 847         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
 848                         ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
 849 
 850         mutex_lock(&fpga_xfrm->lock);
 851         if (!--fpga_xfrm->num_rules) {
 852                 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
 853                 kfree(fpga_xfrm->sa_ctx);
 854                 fpga_xfrm->sa_ctx = NULL;
 855         }
 856         mutex_unlock(&fpga_xfrm->lock);
 857 }
 858 
 859 static inline struct mlx5_fpga_ipsec_rule *
 860 _rule_search(struct rb_root *root, struct fs_fte *fte)
 861 {
 862         struct rb_node *node = root->rb_node;
 863 
 864         while (node) {
 865                 struct mlx5_fpga_ipsec_rule *rule =
 866                                 container_of(node, struct mlx5_fpga_ipsec_rule,
 867                                              node);
 868 
 869                 if (rule->fte < fte)
 870                         node = node->rb_left;
 871                 else if (rule->fte > fte)
 872                         node = node->rb_right;
 873                 else
 874                         return rule;
 875         }
 876         return NULL;
 877 }
 878 
 879 static struct mlx5_fpga_ipsec_rule *
 880 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
 881 {
 882         struct mlx5_fpga_ipsec_rule *rule;
 883 
 884         mutex_lock(&ipsec_dev->rules_rb_lock);
 885         rule = _rule_search(&ipsec_dev->rules_rb, fte);
 886         mutex_unlock(&ipsec_dev->rules_rb_lock);
 887 
 888         return rule;
 889 }
 890 
 891 static inline int _rule_insert(struct rb_root *root,
 892                                struct mlx5_fpga_ipsec_rule *rule)
 893 {
 894         struct rb_node **new = &root->rb_node, *parent = NULL;
 895 
 896         /* Figure out where to put new node */
 897         while (*new) {
 898                 struct mlx5_fpga_ipsec_rule *this =
 899                                 container_of(*new, struct mlx5_fpga_ipsec_rule,
 900                                              node);
 901 
 902                 parent = *new;
 903                 if (rule->fte < this->fte)
 904                         new = &((*new)->rb_left);
 905                 else if (rule->fte > this->fte)
 906                         new = &((*new)->rb_right);
 907                 else
 908                         return -EEXIST;
 909         }
 910 
 911         /* Add new node and rebalance tree. */
 912         rb_link_node(&rule->node, parent, new);
 913         rb_insert_color(&rule->node, root);
 914 
 915         return 0;
 916 }
 917 
 918 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
 919                        struct mlx5_fpga_ipsec_rule *rule)
 920 {
 921         int ret;
 922 
 923         mutex_lock(&ipsec_dev->rules_rb_lock);
 924         ret = _rule_insert(&ipsec_dev->rules_rb, rule);
 925         mutex_unlock(&ipsec_dev->rules_rb_lock);
 926 
 927         return ret;
 928 }
 929 
 930 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
 931                                 struct mlx5_fpga_ipsec_rule *rule)
 932 {
 933         struct rb_root *root = &ipsec_dev->rules_rb;
 934 
 935         mutex_lock(&ipsec_dev->rules_rb_lock);
 936         rb_erase(&rule->node, root);
 937         mutex_unlock(&ipsec_dev->rules_rb_lock);
 938 }
 939 
 940 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
 941                         struct mlx5_fpga_ipsec_rule *rule)
 942 {
 943         _rule_delete(ipsec_dev, rule);
 944         kfree(rule);
 945 }
 946 
 947 struct mailbox_mod {
 948         uintptr_t                       saved_esp_id;
 949         u32                             saved_action;
 950         u32                             saved_outer_esp_spi_value;
 951 };
 952 
 953 static void restore_spec_mailbox(struct fs_fte *fte,
 954                                  struct mailbox_mod *mbox_mod)
 955 {
 956         char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
 957                                            fte->val,
 958                                            misc_parameters);
 959 
 960         MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
 961                  mbox_mod->saved_outer_esp_spi_value);
 962         fte->action.action |= mbox_mod->saved_action;
 963         fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
 964 }
 965 
 966 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
 967                                 struct fs_fte *fte,
 968                                 struct mailbox_mod *mbox_mod)
 969 {
 970         char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
 971                                            fte->val,
 972                                            misc_parameters);
 973 
 974         mbox_mod->saved_esp_id = fte->action.esp_id;
 975         mbox_mod->saved_action = fte->action.action &
 976                         (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
 977                          MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
 978         mbox_mod->saved_outer_esp_spi_value =
 979                         MLX5_GET(fte_match_set_misc, misc_params_v,
 980                                  outer_esp_spi);
 981 
 982         fte->action.esp_id = 0;
 983         fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
 984                                 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
 985         if (!MLX5_CAP_FLOWTABLE(mdev,
 986                                 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
 987                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
 988 }
 989 
 990 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
 991 {
 992         return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
 993 }
 994 
 995 static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
 996                                            struct mlx5_flow_table *ft,
 997                                            u32 *in,
 998                                            struct mlx5_flow_group *fg,
 999                                            bool is_egress)
1000 {
1001         int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
1002                                  struct mlx5_flow_table *ft, u32 *in,
1003                                  struct mlx5_flow_group *fg) =
1004                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
1005         char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
1006                                            match_criteria.misc_parameters);
1007         struct mlx5_core_dev *dev = ns->dev;
1008         u32 saved_outer_esp_spi_mask;
1009         u8 match_criteria_enable;
1010         int ret;
1011 
1012         if (MLX5_CAP_FLOWTABLE(dev,
1013                                flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1014                 return create_flow_group(ns, ft, in, fg);
1015 
1016         match_criteria_enable =
1017                 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1018         saved_outer_esp_spi_mask =
1019                 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1020         if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1021                 return create_flow_group(ns, ft, in, fg);
1022 
1023         MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1024 
1025         if (!(*misc_params_c) &&
1026             !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1027                 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1028                          match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1029 
1030         ret = create_flow_group(ns, ft, in, fg);
1031 
1032         MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1033         MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1034 
1035         return ret;
1036 }
1037 
1038 static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
1039                                     struct mlx5_flow_table *ft,
1040                                     struct mlx5_flow_group *fg,
1041                                     struct fs_fte *fte,
1042                                     bool is_egress)
1043 {
1044         int (*create_fte)(struct mlx5_flow_root_namespace *ns,
1045                           struct mlx5_flow_table *ft,
1046                           struct mlx5_flow_group *fg,
1047                           struct fs_fte *fte) =
1048                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1049         struct mlx5_core_dev *dev = ns->dev;
1050         struct mlx5_fpga_device *fdev = dev->fpga;
1051         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1052         struct mlx5_fpga_ipsec_rule *rule;
1053         bool is_esp = fte->action.esp_id;
1054         struct mailbox_mod mbox_mod;
1055         int ret;
1056 
1057         if (!is_esp ||
1058             !(fte->action.action &
1059               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1060                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1061                 return create_fte(ns, ft, fg, fte);
1062 
1063         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1064         if (!rule)
1065                 return -ENOMEM;
1066 
1067         rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1068         if (IS_ERR(rule->ctx)) {
1069                 int err = PTR_ERR(rule->ctx);
1070                 kfree(rule);
1071                 return err;
1072         }
1073 
1074         rule->fte = fte;
1075         WARN_ON(rule_insert(fipsec, rule));
1076 
1077         modify_spec_mailbox(dev, fte, &mbox_mod);
1078         ret = create_fte(ns, ft, fg, fte);
1079         restore_spec_mailbox(fte, &mbox_mod);
1080         if (ret) {
1081                 _rule_delete(fipsec, rule);
1082                 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1083                 kfree(rule);
1084         }
1085 
1086         return ret;
1087 }
1088 
1089 static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
1090                                     struct mlx5_flow_table *ft,
1091                                     struct mlx5_flow_group *fg,
1092                                     int modify_mask,
1093                                     struct fs_fte *fte,
1094                                     bool is_egress)
1095 {
1096         int (*update_fte)(struct mlx5_flow_root_namespace *ns,
1097                           struct mlx5_flow_table *ft,
1098                           struct mlx5_flow_group *fg,
1099                           int modify_mask,
1100                           struct fs_fte *fte) =
1101                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1102         struct mlx5_core_dev *dev = ns->dev;
1103         bool is_esp = fte->action.esp_id;
1104         struct mailbox_mod mbox_mod;
1105         int ret;
1106 
1107         if (!is_esp ||
1108             !(fte->action.action &
1109               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1110                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1111                 return update_fte(ns, ft, fg, modify_mask, fte);
1112 
1113         modify_spec_mailbox(dev, fte, &mbox_mod);
1114         ret = update_fte(ns, ft, fg, modify_mask, fte);
1115         restore_spec_mailbox(fte, &mbox_mod);
1116 
1117         return ret;
1118 }
1119 
1120 static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
1121                                     struct mlx5_flow_table *ft,
1122                                     struct fs_fte *fte,
1123                                     bool is_egress)
1124 {
1125         int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
1126                           struct mlx5_flow_table *ft,
1127                           struct fs_fte *fte) =
1128                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1129         struct mlx5_core_dev *dev = ns->dev;
1130         struct mlx5_fpga_device *fdev = dev->fpga;
1131         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1132         struct mlx5_fpga_ipsec_rule *rule;
1133         bool is_esp = fte->action.esp_id;
1134         struct mailbox_mod mbox_mod;
1135         int ret;
1136 
1137         if (!is_esp ||
1138             !(fte->action.action &
1139               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1140                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1141                 return delete_fte(ns, ft, fte);
1142 
1143         rule = rule_search(fipsec, fte);
1144         if (!rule)
1145                 return -ENOENT;
1146 
1147         mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1148         rule_delete(fipsec, rule);
1149 
1150         modify_spec_mailbox(dev, fte, &mbox_mod);
1151         ret = delete_fte(ns, ft, fte);
1152         restore_spec_mailbox(fte, &mbox_mod);
1153 
1154         return ret;
1155 }
1156 
1157 static int
1158 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
1159                                             struct mlx5_flow_table *ft,
1160                                             u32 *in,
1161                                             struct mlx5_flow_group *fg)
1162 {
1163         return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
1164 }
1165 
1166 static int
1167 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
1168                                      struct mlx5_flow_table *ft,
1169                                      struct mlx5_flow_group *fg,
1170                                      struct fs_fte *fte)
1171 {
1172         return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
1173 }
1174 
1175 static int
1176 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
1177                                      struct mlx5_flow_table *ft,
1178                                      struct mlx5_flow_group *fg,
1179                                      int modify_mask,
1180                                      struct fs_fte *fte)
1181 {
1182         return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1183                                         true);
1184 }
1185 
1186 static int
1187 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
1188                                      struct mlx5_flow_table *ft,
1189                                      struct fs_fte *fte)
1190 {
1191         return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
1192 }
1193 
1194 static int
1195 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
1196                                              struct mlx5_flow_table *ft,
1197                                              u32 *in,
1198                                              struct mlx5_flow_group *fg)
1199 {
1200         return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
1201 }
1202 
1203 static int
1204 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
1205                                       struct mlx5_flow_table *ft,
1206                                       struct mlx5_flow_group *fg,
1207                                       struct fs_fte *fte)
1208 {
1209         return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
1210 }
1211 
1212 static int
1213 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
1214                                       struct mlx5_flow_table *ft,
1215                                       struct mlx5_flow_group *fg,
1216                                       int modify_mask,
1217                                       struct fs_fte *fte)
1218 {
1219         return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1220                                         false);
1221 }
1222 
1223 static int
1224 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
1225                                       struct mlx5_flow_table *ft,
1226                                       struct fs_fte *fte)
1227 {
1228         return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
1229 }
1230 
1231 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1232 static struct mlx5_flow_cmds fpga_ipsec_egress;
1233 
1234 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1235 {
1236         switch (type) {
1237         case FS_FT_NIC_RX:
1238                 return &fpga_ipsec_ingress;
1239         case FS_FT_NIC_TX:
1240                 return &fpga_ipsec_egress;
1241         default:
1242                 WARN_ON(true);
1243                 return NULL;
1244         }
1245 }
1246 
1247 int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1248 {
1249         struct mlx5_fpga_conn_attr init_attr = {0};
1250         struct mlx5_fpga_device *fdev = mdev->fpga;
1251         struct mlx5_fpga_conn *conn;
1252         int err;
1253 
1254         if (!mlx5_fpga_is_ipsec_device(mdev))
1255                 return 0;
1256 
1257         fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1258         if (!fdev->ipsec)
1259                 return -ENOMEM;
1260 
1261         fdev->ipsec->fdev = fdev;
1262 
1263         err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1264                                      fdev->ipsec->caps);
1265         if (err) {
1266                 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1267                               err);
1268                 goto error;
1269         }
1270 
1271         INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1272         spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1273 
1274         init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1275         init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1276         init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1277         init_attr.cb_arg = fdev;
1278         conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1279         if (IS_ERR(conn)) {
1280                 err = PTR_ERR(conn);
1281                 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1282                               err);
1283                 goto error;
1284         }
1285         fdev->ipsec->conn = conn;
1286 
1287         err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1288         if (err)
1289                 goto err_destroy_conn;
1290         mutex_init(&fdev->ipsec->sa_hash_lock);
1291 
1292         fdev->ipsec->rules_rb = RB_ROOT;
1293         mutex_init(&fdev->ipsec->rules_rb_lock);
1294 
1295         err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1296         if (err) {
1297                 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1298                               err);
1299                 goto err_destroy_hash;
1300         }
1301 
1302         return 0;
1303 
1304 err_destroy_hash:
1305         rhashtable_destroy(&fdev->ipsec->sa_hash);
1306 
1307 err_destroy_conn:
1308         mlx5_fpga_sbu_conn_destroy(conn);
1309 
1310 error:
1311         kfree(fdev->ipsec);
1312         fdev->ipsec = NULL;
1313         return err;
1314 }
1315 
1316 static void destroy_rules_rb(struct rb_root *root)
1317 {
1318         struct mlx5_fpga_ipsec_rule *r, *tmp;
1319 
1320         rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1321                 rb_erase(&r->node, root);
1322                 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1323                 kfree(r);
1324         }
1325 }
1326 
1327 void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1328 {
1329         struct mlx5_fpga_device *fdev = mdev->fpga;
1330 
1331         if (!mlx5_fpga_is_ipsec_device(mdev))
1332                 return;
1333 
1334         destroy_rules_rb(&fdev->ipsec->rules_rb);
1335         rhashtable_destroy(&fdev->ipsec->sa_hash);
1336 
1337         mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1338         kfree(fdev->ipsec);
1339         fdev->ipsec = NULL;
1340 }
1341 
1342 void mlx5_fpga_ipsec_build_fs_cmds(void)
1343 {
1344         /* ingress */
1345         fpga_ipsec_ingress.create_flow_table =
1346                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1347         fpga_ipsec_ingress.destroy_flow_table =
1348                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1349         fpga_ipsec_ingress.modify_flow_table =
1350                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1351         fpga_ipsec_ingress.create_flow_group =
1352                 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1353         fpga_ipsec_ingress.destroy_flow_group =
1354                  mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1355         fpga_ipsec_ingress.create_fte =
1356                 mlx5_fpga_ipsec_fs_create_fte_ingress;
1357         fpga_ipsec_ingress.update_fte =
1358                 mlx5_fpga_ipsec_fs_update_fte_ingress;
1359         fpga_ipsec_ingress.delete_fte =
1360                 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1361         fpga_ipsec_ingress.update_root_ft =
1362                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1363 
1364         /* egress */
1365         fpga_ipsec_egress.create_flow_table =
1366                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1367         fpga_ipsec_egress.destroy_flow_table =
1368                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1369         fpga_ipsec_egress.modify_flow_table =
1370                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1371         fpga_ipsec_egress.create_flow_group =
1372                 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1373         fpga_ipsec_egress.destroy_flow_group =
1374                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1375         fpga_ipsec_egress.create_fte =
1376                 mlx5_fpga_ipsec_fs_create_fte_egress;
1377         fpga_ipsec_egress.update_fte =
1378                 mlx5_fpga_ipsec_fs_update_fte_egress;
1379         fpga_ipsec_egress.delete_fte =
1380                 mlx5_fpga_ipsec_fs_delete_fte_egress;
1381         fpga_ipsec_egress.update_root_ft =
1382                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1383 }
1384 
1385 static int
1386 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1387                                   const struct mlx5_accel_esp_xfrm_attrs *attrs)
1388 {
1389         if (attrs->tfc_pad) {
1390                 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1391                 return -EOPNOTSUPP;
1392         }
1393 
1394         if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1395                 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1396                 return -EOPNOTSUPP;
1397         }
1398 
1399         if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1400                 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1401                 return -EOPNOTSUPP;
1402         }
1403 
1404         if (attrs->keymat.aes_gcm.iv_algo !=
1405             MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1406                 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1407                 return -EOPNOTSUPP;
1408         }
1409 
1410         if (attrs->keymat.aes_gcm.icv_len != 128) {
1411                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1412                 return -EOPNOTSUPP;
1413         }
1414 
1415         if (attrs->keymat.aes_gcm.key_len != 128 &&
1416             attrs->keymat.aes_gcm.key_len != 256) {
1417                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1418                 return -EOPNOTSUPP;
1419         }
1420 
1421         if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1422             (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1423                        v2_command))) {
1424                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1425                 return -EOPNOTSUPP;
1426         }
1427 
1428         return 0;
1429 }
1430 
1431 struct mlx5_accel_esp_xfrm *
1432 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1433                           const struct mlx5_accel_esp_xfrm_attrs *attrs,
1434                           u32 flags)
1435 {
1436         struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1437 
1438         if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1439                 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1440                 return ERR_PTR(-EINVAL);
1441         }
1442 
1443         if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1444                 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1445                 return ERR_PTR(-EOPNOTSUPP);
1446         }
1447 
1448         fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1449         if (!fpga_xfrm)
1450                 return ERR_PTR(-ENOMEM);
1451 
1452         mutex_init(&fpga_xfrm->lock);
1453         memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1454                sizeof(fpga_xfrm->accel_xfrm.attrs));
1455 
1456         return &fpga_xfrm->accel_xfrm;
1457 }
1458 
1459 void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1460 {
1461         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1462                         container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1463                                      accel_xfrm);
1464         /* assuming no sa_ctx are connected to this xfrm_ctx */
1465         kfree(fpga_xfrm);
1466 }
1467 
1468 int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1469                               const struct mlx5_accel_esp_xfrm_attrs *attrs)
1470 {
1471         struct mlx5_core_dev *mdev = xfrm->mdev;
1472         struct mlx5_fpga_device *fdev = mdev->fpga;
1473         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1474         struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1475         struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1476 
1477         int err = 0;
1478 
1479         if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1480                 return 0;
1481 
1482         if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1483                 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1484                 return -EOPNOTSUPP;
1485         }
1486 
1487         if (is_v2_sadb_supported(fipsec)) {
1488                 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1489                 return -EOPNOTSUPP;
1490         }
1491 
1492         fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1493 
1494         mutex_lock(&fpga_xfrm->lock);
1495 
1496         if (!fpga_xfrm->sa_ctx)
1497                 /* Unbounded xfrm, chane only sw attrs */
1498                 goto change_sw_xfrm_attrs;
1499 
1500         /* copy original hw sa */
1501         memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1502         mutex_lock(&fipsec->sa_hash_lock);
1503         /* remove original hw sa from hash */
1504         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1505                                        &fpga_xfrm->sa_ctx->hash, rhash_sa));
1506         /* update hw_sa with new xfrm attrs*/
1507         mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1508                                       &fpga_xfrm->sa_ctx->hw_sa);
1509         /* try to insert new hw_sa to hash */
1510         err = rhashtable_insert_fast(&fipsec->sa_hash,
1511                                      &fpga_xfrm->sa_ctx->hash, rhash_sa);
1512         if (err)
1513                 goto rollback_sa;
1514 
1515         /* modify device with new hw_sa */
1516         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1517                                            MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1518         fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1519         if (err)
1520                 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1521                                                &fpga_xfrm->sa_ctx->hash,
1522                                                rhash_sa));
1523 rollback_sa:
1524         if (err) {
1525                 /* return original hw_sa to hash */
1526                 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1527                        sizeof(org_hw_sa));
1528                 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1529                                                &fpga_xfrm->sa_ctx->hash,
1530                                                rhash_sa));
1531         }
1532         mutex_unlock(&fipsec->sa_hash_lock);
1533 
1534 change_sw_xfrm_attrs:
1535         if (!err)
1536                 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1537         mutex_unlock(&fpga_xfrm->lock);
1538         return err;
1539 }

/* [<][>][^][v][top][bottom][index][help] */