root/drivers/net/ethernet/qlogic/qed/qed_iwarp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qed_iwarp_init_devinfo
  2. qed_iwarp_init_hw
  3. qed_iwarp_cid_cleaned
  4. qed_iwarp_init_fw_ramrod
  5. qed_iwarp_alloc_cid
  6. qed_iwarp_set_tcp_cid
  7. qed_iwarp_alloc_tcp_cid
  8. qed_iwarp_create_qp
  9. qed_iwarp_modify_fw
  10. qed_roce2iwarp_state
  11. qed_iwarp2roce_state
  12. qed_iwarp_modify_qp
  13. qed_iwarp_fw_destroy
  14. qed_iwarp_destroy_ep
  15. qed_iwarp_destroy_qp
  16. qed_iwarp_create_ep
  17. qed_iwarp_print_tcp_ramrod
  18. qed_iwarp_tcp_offload
  19. qed_iwarp_mpa_received
  20. qed_iwarp_mpa_offload
  21. qed_iwarp_return_ep
  22. qed_iwarp_parse_private_data
  23. qed_iwarp_mpa_reply_arrived
  24. qed_iwarp_mpa_complete
  25. qed_iwarp_mpa_v2_set_private
  26. qed_iwarp_connect
  27. qed_iwarp_get_free_ep
  28. qed_iwarp_wait_cid_map_cleared
  29. qed_iwarp_wait_for_all_cids
  30. qed_iwarp_free_prealloc_ep
  31. qed_iwarp_prealloc_ep
  32. qed_iwarp_alloc
  33. qed_iwarp_resc_free
  34. qed_iwarp_accept
  35. qed_iwarp_reject
  36. qed_iwarp_print_cm_info
  37. qed_iwarp_ll2_post_rx
  38. qed_iwarp_ep_exists
  39. qed_iwarp_get_listener
  40. qed_iwarp_parse_rx_pkt
  41. qed_iwarp_get_curr_fpdu
  42. qed_iwarp_mpa_classify
  43. qed_iwarp_init_fpdu
  44. qed_iwarp_cp_pkt
  45. qed_iwarp_update_fpdu_length
  46. qed_iwarp_recycle_pkt
  47. qed_iwarp_win_right_edge
  48. qed_iwarp_send_fpdu
  49. qed_iwarp_mpa_get_data
  50. qed_iwarp_process_mpa_pkt
  51. qed_iwarp_process_pending_pkts
  52. qed_iwarp_ll2_comp_mpa_pkt
  53. qed_iwarp_ll2_comp_syn_pkt
  54. qed_iwarp_ll2_rel_rx_pkt
  55. qed_iwarp_ll2_comp_tx_pkt
  56. qed_iwarp_ll2_rel_tx_pkt
  57. qed_iwarp_ll2_slowpath
  58. qed_iwarp_ll2_stop
  59. qed_iwarp_ll2_alloc_buffers
  60. qed_iwarp_ll2_start
  61. qed_iwarp_setup
  62. qed_iwarp_stop
  63. qed_iwarp_qp_in_error
  64. qed_iwarp_exception_received
  65. qed_iwarp_tcp_connect_unsuccessful
  66. qed_iwarp_connect_complete
  67. qed_iwarp_check_ep_ok
  68. qed_iwarp_async_event
  69. qed_iwarp_create_listen
  70. qed_iwarp_destroy_listen
  71. qed_iwarp_send_rtr
  72. qed_iwarp_query_qp

   1 /* QLogic qed NIC Driver
   2  * Copyright (c) 2015-2017  QLogic Corporation
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and /or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 #include <linux/if_ether.h>
  33 #include <linux/if_vlan.h>
  34 #include <linux/ip.h>
  35 #include <linux/ipv6.h>
  36 #include <linux/spinlock.h>
  37 #include <linux/tcp.h>
  38 #include "qed_cxt.h"
  39 #include "qed_hw.h"
  40 #include "qed_ll2.h"
  41 #include "qed_rdma.h"
  42 #include "qed_reg_addr.h"
  43 #include "qed_sp.h"
  44 #include "qed_ooo.h"
  45 
  46 #define QED_IWARP_ORD_DEFAULT           32
  47 #define QED_IWARP_IRD_DEFAULT           32
  48 #define QED_IWARP_MAX_FW_MSS            4120
  49 
  50 #define QED_EP_SIG 0xecabcdef
  51 
  52 struct mpa_v2_hdr {
  53         __be16 ird;
  54         __be16 ord;
  55 };
  56 
  57 #define MPA_V2_PEER2PEER_MODEL  0x8000
  58 #define MPA_V2_SEND_RTR         0x4000  /* on ird */
  59 #define MPA_V2_READ_RTR         0x4000  /* on ord */
  60 #define MPA_V2_WRITE_RTR        0x8000
  61 #define MPA_V2_IRD_ORD_MASK     0x3FFF
  62 
  63 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  64 
  65 #define QED_IWARP_INVALID_TCP_CID       0xffffffff
  66 
  67 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
  68 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
  69 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
  70 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
  71 
  72 #define QED_IWARP_RCV_WND_SIZE_MIN      (0xffff)
  73 #define TIMESTAMP_HEADER_SIZE           (12)
  74 #define QED_IWARP_MAX_FIN_RT_DEFAULT    (2)
  75 
  76 #define QED_IWARP_TS_EN                 BIT(0)
  77 #define QED_IWARP_DA_EN                 BIT(1)
  78 #define QED_IWARP_PARAM_CRC_NEEDED      (1)
  79 #define QED_IWARP_PARAM_P2P             (1)
  80 
  81 #define QED_IWARP_DEF_MAX_RT_TIME       (0)
  82 #define QED_IWARP_DEF_CWND_FACTOR       (4)
  83 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT  (5)
  84 #define QED_IWARP_DEF_KA_TIMEOUT        (1200000)       /* 20 min */
  85 #define QED_IWARP_DEF_KA_INTERVAL       (1000)          /* 1 sec */
  86 
  87 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  88                                  u8 fw_event_code, u16 echo,
  89                                  union event_ring_data *data,
  90                                  u8 fw_return_code);
  91 
  92 /* Override devinfo with iWARP specific values */
  93 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  94 {
  95         struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  96 
  97         dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  98         dev->max_qp = min_t(u32,
  99                             IWARP_MAX_QPS,
 100                             p_hwfn->p_rdma_info->num_qps) -
 101                       QED_IWARP_PREALLOC_CNT;
 102 
 103         dev->max_cq = dev->max_qp;
 104 
 105         dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
 106         dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
 107 }
 108 
 109 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 110 {
 111         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
 112         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
 113         p_hwfn->b_rdma_enabled_in_prs = true;
 114 }
 115 
 116 /* We have two cid maps, one for tcp which should be used only from passive
 117  * syn processing and replacing a pre-allocated ep in the list. The second
 118  * for active tcp and for QPs.
 119  */
 120 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 121 {
 122         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 123 
 124         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 125 
 126         if (cid < QED_IWARP_PREALLOC_CNT)
 127                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 128                                     cid);
 129         else
 130                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 131 
 132         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 133 }
 134 
 135 void
 136 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 137                          struct iwarp_init_func_ramrod_data *p_ramrod)
 138 {
 139         p_ramrod->iwarp.ll2_ooo_q_index =
 140                 RESC_START(p_hwfn, QED_LL2_QUEUE) +
 141                 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 142 
 143         p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 144 
 145         return;
 146 }
 147 
 148 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 149 {
 150         int rc;
 151 
 152         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 153         rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 154         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 155         if (rc) {
 156                 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 157                 return rc;
 158         }
 159         *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 160 
 161         rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 162         if (rc)
 163                 qed_iwarp_cid_cleaned(p_hwfn, *cid);
 164 
 165         return rc;
 166 }
 167 
 168 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 169 {
 170         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 171 
 172         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 173         qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 174         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 175 }
 176 
 177 /* This function allocates a cid for passive tcp (called from syn receive)
 178  * the reason it's separate from the regular cid allocation is because it
 179  * is assured that these cids already have ilt allocated. They are preallocated
 180  * to ensure that we won't need to allocate memory during syn processing
 181  */
 182 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 183 {
 184         int rc;
 185 
 186         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 187 
 188         rc = qed_rdma_bmap_alloc_id(p_hwfn,
 189                                     &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 190 
 191         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 192 
 193         if (rc) {
 194                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 195                            "can't allocate iwarp tcp cid max-count=%d\n",
 196                            p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 197 
 198                 *cid = QED_IWARP_INVALID_TCP_CID;
 199                 return rc;
 200         }
 201 
 202         *cid += qed_cxt_get_proto_cid_start(p_hwfn,
 203                                             p_hwfn->p_rdma_info->proto);
 204         return 0;
 205 }
 206 
 207 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 208                         struct qed_rdma_qp *qp,
 209                         struct qed_rdma_create_qp_out_params *out_params)
 210 {
 211         struct iwarp_create_qp_ramrod_data *p_ramrod;
 212         struct qed_sp_init_data init_data;
 213         struct qed_spq_entry *p_ent;
 214         u16 physical_queue;
 215         u32 cid;
 216         int rc;
 217 
 218         qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 219                                               IWARP_SHARED_QUEUE_PAGE_SIZE,
 220                                               &qp->shared_queue_phys_addr,
 221                                               GFP_KERNEL);
 222         if (!qp->shared_queue)
 223                 return -ENOMEM;
 224 
 225         out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 226             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 227         out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 228             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 229         out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 230             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 231         out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 232             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 233 
 234         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 235         if (rc)
 236                 goto err1;
 237 
 238         qp->icid = (u16)cid;
 239 
 240         memset(&init_data, 0, sizeof(init_data));
 241         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 242         init_data.cid = qp->icid;
 243         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 244 
 245         rc = qed_sp_init_request(p_hwfn, &p_ent,
 246                                  IWARP_RAMROD_CMD_ID_CREATE_QP,
 247                                  PROTOCOLID_IWARP, &init_data);
 248         if (rc)
 249                 goto err2;
 250 
 251         p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 252 
 253         SET_FIELD(p_ramrod->flags,
 254                   IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 255                   qp->fmr_and_reserved_lkey);
 256 
 257         SET_FIELD(p_ramrod->flags,
 258                   IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 259 
 260         SET_FIELD(p_ramrod->flags,
 261                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 262                   qp->incoming_rdma_read_en);
 263 
 264         SET_FIELD(p_ramrod->flags,
 265                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 266                   qp->incoming_rdma_write_en);
 267 
 268         SET_FIELD(p_ramrod->flags,
 269                   IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 270                   qp->incoming_atomic_en);
 271 
 272         SET_FIELD(p_ramrod->flags,
 273                   IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 274 
 275         p_ramrod->pd = qp->pd;
 276         p_ramrod->sq_num_pages = qp->sq_num_pages;
 277         p_ramrod->rq_num_pages = qp->rq_num_pages;
 278 
 279         p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 280         p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 281         p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 282         p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 283 
 284         p_ramrod->cq_cid_for_sq =
 285             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 286         p_ramrod->cq_cid_for_rq =
 287             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 288 
 289         p_ramrod->dpi = cpu_to_le16(qp->dpi);
 290 
 291         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 292         p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 293         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 294         p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 295 
 296         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 297         if (rc)
 298                 goto err2;
 299 
 300         return rc;
 301 
 302 err2:
 303         qed_iwarp_cid_cleaned(p_hwfn, cid);
 304 err1:
 305         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 306                           IWARP_SHARED_QUEUE_PAGE_SIZE,
 307                           qp->shared_queue, qp->shared_queue_phys_addr);
 308 
 309         return rc;
 310 }
 311 
 312 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 313 {
 314         struct iwarp_modify_qp_ramrod_data *p_ramrod;
 315         struct qed_sp_init_data init_data;
 316         struct qed_spq_entry *p_ent;
 317         int rc;
 318 
 319         /* Get SPQ entry */
 320         memset(&init_data, 0, sizeof(init_data));
 321         init_data.cid = qp->icid;
 322         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 323         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 324 
 325         rc = qed_sp_init_request(p_hwfn, &p_ent,
 326                                  IWARP_RAMROD_CMD_ID_MODIFY_QP,
 327                                  p_hwfn->p_rdma_info->proto, &init_data);
 328         if (rc)
 329                 return rc;
 330 
 331         p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 332         SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
 333                   0x1);
 334         if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 335                 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 336         else
 337                 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 338 
 339         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 340 
 341         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 342 
 343         return rc;
 344 }
 345 
 346 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 347 {
 348         switch (state) {
 349         case QED_ROCE_QP_STATE_RESET:
 350         case QED_ROCE_QP_STATE_INIT:
 351         case QED_ROCE_QP_STATE_RTR:
 352                 return QED_IWARP_QP_STATE_IDLE;
 353         case QED_ROCE_QP_STATE_RTS:
 354                 return QED_IWARP_QP_STATE_RTS;
 355         case QED_ROCE_QP_STATE_SQD:
 356                 return QED_IWARP_QP_STATE_CLOSING;
 357         case QED_ROCE_QP_STATE_ERR:
 358                 return QED_IWARP_QP_STATE_ERROR;
 359         case QED_ROCE_QP_STATE_SQE:
 360                 return QED_IWARP_QP_STATE_TERMINATE;
 361         default:
 362                 return QED_IWARP_QP_STATE_ERROR;
 363         }
 364 }
 365 
 366 static enum qed_roce_qp_state
 367 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 368 {
 369         switch (state) {
 370         case QED_IWARP_QP_STATE_IDLE:
 371                 return QED_ROCE_QP_STATE_INIT;
 372         case QED_IWARP_QP_STATE_RTS:
 373                 return QED_ROCE_QP_STATE_RTS;
 374         case QED_IWARP_QP_STATE_TERMINATE:
 375                 return QED_ROCE_QP_STATE_SQE;
 376         case QED_IWARP_QP_STATE_CLOSING:
 377                 return QED_ROCE_QP_STATE_SQD;
 378         case QED_IWARP_QP_STATE_ERROR:
 379                 return QED_ROCE_QP_STATE_ERR;
 380         default:
 381                 return QED_ROCE_QP_STATE_ERR;
 382         }
 383 }
 384 
 385 static const char * const iwarp_state_names[] = {
 386         "IDLE",
 387         "RTS",
 388         "TERMINATE",
 389         "CLOSING",
 390         "ERROR",
 391 };
 392 
 393 int
 394 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 395                     struct qed_rdma_qp *qp,
 396                     enum qed_iwarp_qp_state new_state, bool internal)
 397 {
 398         enum qed_iwarp_qp_state prev_iw_state;
 399         bool modify_fw = false;
 400         int rc = 0;
 401 
 402         /* modify QP can be called from upper-layer or as a result of async
 403          * RST/FIN... therefore need to protect
 404          */
 405         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 406         prev_iw_state = qp->iwarp_state;
 407 
 408         if (prev_iw_state == new_state) {
 409                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 410                 return 0;
 411         }
 412 
 413         switch (prev_iw_state) {
 414         case QED_IWARP_QP_STATE_IDLE:
 415                 switch (new_state) {
 416                 case QED_IWARP_QP_STATE_RTS:
 417                         qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 418                         break;
 419                 case QED_IWARP_QP_STATE_ERROR:
 420                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 421                         if (!internal)
 422                                 modify_fw = true;
 423                         break;
 424                 default:
 425                         break;
 426                 }
 427                 break;
 428         case QED_IWARP_QP_STATE_RTS:
 429                 switch (new_state) {
 430                 case QED_IWARP_QP_STATE_CLOSING:
 431                         if (!internal)
 432                                 modify_fw = true;
 433 
 434                         qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 435                         break;
 436                 case QED_IWARP_QP_STATE_ERROR:
 437                         if (!internal)
 438                                 modify_fw = true;
 439                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 440                         break;
 441                 default:
 442                         break;
 443                 }
 444                 break;
 445         case QED_IWARP_QP_STATE_ERROR:
 446                 switch (new_state) {
 447                 case QED_IWARP_QP_STATE_IDLE:
 448 
 449                         qp->iwarp_state = new_state;
 450                         break;
 451                 case QED_IWARP_QP_STATE_CLOSING:
 452                         /* could happen due to race... do nothing.... */
 453                         break;
 454                 default:
 455                         rc = -EINVAL;
 456                 }
 457                 break;
 458         case QED_IWARP_QP_STATE_TERMINATE:
 459         case QED_IWARP_QP_STATE_CLOSING:
 460                 qp->iwarp_state = new_state;
 461                 break;
 462         default:
 463                 break;
 464         }
 465 
 466         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 467                    qp->icid,
 468                    iwarp_state_names[prev_iw_state],
 469                    iwarp_state_names[qp->iwarp_state],
 470                    internal ? "internal" : "");
 471 
 472         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 473 
 474         if (modify_fw)
 475                 rc = qed_iwarp_modify_fw(p_hwfn, qp);
 476 
 477         return rc;
 478 }
 479 
 480 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 481 {
 482         struct qed_sp_init_data init_data;
 483         struct qed_spq_entry *p_ent;
 484         int rc;
 485 
 486         /* Get SPQ entry */
 487         memset(&init_data, 0, sizeof(init_data));
 488         init_data.cid = qp->icid;
 489         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 490         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 491 
 492         rc = qed_sp_init_request(p_hwfn, &p_ent,
 493                                  IWARP_RAMROD_CMD_ID_DESTROY_QP,
 494                                  p_hwfn->p_rdma_info->proto, &init_data);
 495         if (rc)
 496                 return rc;
 497 
 498         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 499 
 500         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 501 
 502         return rc;
 503 }
 504 
 505 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 506                                  struct qed_iwarp_ep *ep,
 507                                  bool remove_from_active_list)
 508 {
 509         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 510                           sizeof(*ep->ep_buffer_virt),
 511                           ep->ep_buffer_virt, ep->ep_buffer_phys);
 512 
 513         if (remove_from_active_list) {
 514                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 515                 list_del(&ep->list_entry);
 516                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 517         }
 518 
 519         if (ep->qp)
 520                 ep->qp->ep = NULL;
 521 
 522         kfree(ep);
 523 }
 524 
 525 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 526 {
 527         struct qed_iwarp_ep *ep = qp->ep;
 528         int wait_count = 0;
 529         int rc = 0;
 530 
 531         if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 532                 rc = qed_iwarp_modify_qp(p_hwfn, qp,
 533                                          QED_IWARP_QP_STATE_ERROR, false);
 534                 if (rc)
 535                         return rc;
 536         }
 537 
 538         /* Make sure ep is closed before returning and freeing memory. */
 539         if (ep) {
 540                 while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
 541                        wait_count++ < 200)
 542                         msleep(100);
 543 
 544                 if (ep->state != QED_IWARP_EP_CLOSED)
 545                         DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 546                                   ep->state);
 547 
 548                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
 549         }
 550 
 551         rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 552 
 553         if (qp->shared_queue)
 554                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 555                                   IWARP_SHARED_QUEUE_PAGE_SIZE,
 556                                   qp->shared_queue, qp->shared_queue_phys_addr);
 557 
 558         return rc;
 559 }
 560 
 561 static int
 562 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 563 {
 564         struct qed_iwarp_ep *ep;
 565         int rc;
 566 
 567         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 568         if (!ep)
 569                 return -ENOMEM;
 570 
 571         ep->state = QED_IWARP_EP_INIT;
 572 
 573         ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 574                                                 sizeof(*ep->ep_buffer_virt),
 575                                                 &ep->ep_buffer_phys,
 576                                                 GFP_KERNEL);
 577         if (!ep->ep_buffer_virt) {
 578                 rc = -ENOMEM;
 579                 goto err;
 580         }
 581 
 582         ep->sig = QED_EP_SIG;
 583 
 584         *ep_out = ep;
 585 
 586         return 0;
 587 
 588 err:
 589         kfree(ep);
 590         return rc;
 591 }
 592 
 593 static void
 594 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 595                            struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 596 {
 597         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 598                    p_tcp_ramrod->tcp.local_mac_addr_lo,
 599                    p_tcp_ramrod->tcp.local_mac_addr_mid,
 600                    p_tcp_ramrod->tcp.local_mac_addr_hi,
 601                    p_tcp_ramrod->tcp.remote_mac_addr_lo,
 602                    p_tcp_ramrod->tcp.remote_mac_addr_mid,
 603                    p_tcp_ramrod->tcp.remote_mac_addr_hi);
 604 
 605         if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 606                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 607                            "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 608                            p_tcp_ramrod->tcp.local_ip,
 609                            p_tcp_ramrod->tcp.local_port,
 610                            p_tcp_ramrod->tcp.remote_ip,
 611                            p_tcp_ramrod->tcp.remote_port,
 612                            p_tcp_ramrod->tcp.vlan_id);
 613         } else {
 614                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 615                            "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 616                            p_tcp_ramrod->tcp.local_ip,
 617                            p_tcp_ramrod->tcp.local_port,
 618                            p_tcp_ramrod->tcp.remote_ip,
 619                            p_tcp_ramrod->tcp.remote_port,
 620                            p_tcp_ramrod->tcp.vlan_id);
 621         }
 622 
 623         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 624                    "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 625                    p_tcp_ramrod->tcp.flow_label,
 626                    p_tcp_ramrod->tcp.ttl,
 627                    p_tcp_ramrod->tcp.tos_or_tc,
 628                    p_tcp_ramrod->tcp.mss,
 629                    p_tcp_ramrod->tcp.rcv_wnd_scale,
 630                    p_tcp_ramrod->tcp.connect_mode,
 631                    p_tcp_ramrod->tcp.flags);
 632 
 633         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 634                    p_tcp_ramrod->tcp.syn_ip_payload_length,
 635                    p_tcp_ramrod->tcp.syn_phy_addr_lo,
 636                    p_tcp_ramrod->tcp.syn_phy_addr_hi);
 637 }
 638 
 639 static int
 640 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 641 {
 642         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 643         struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 644         struct tcp_offload_params_opt2 *tcp;
 645         struct qed_sp_init_data init_data;
 646         struct qed_spq_entry *p_ent;
 647         dma_addr_t async_output_phys;
 648         dma_addr_t in_pdata_phys;
 649         u16 physical_q;
 650         u8 tcp_flags;
 651         int rc;
 652         int i;
 653 
 654         memset(&init_data, 0, sizeof(init_data));
 655         init_data.cid = ep->tcp_cid;
 656         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 657         if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 658                 init_data.comp_mode = QED_SPQ_MODE_CB;
 659         else
 660                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 661 
 662         rc = qed_sp_init_request(p_hwfn, &p_ent,
 663                                  IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 664                                  PROTOCOLID_IWARP, &init_data);
 665         if (rc)
 666                 return rc;
 667 
 668         p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 669 
 670         in_pdata_phys = ep->ep_buffer_phys +
 671                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
 672         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 673                        in_pdata_phys);
 674 
 675         p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 676             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 677 
 678         async_output_phys = ep->ep_buffer_phys +
 679                             offsetof(struct qed_iwarp_ep_memory, async_output);
 680         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 681                        async_output_phys);
 682 
 683         p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 684         p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 685 
 686         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 687         p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 688         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 689         p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 690         p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 691 
 692         tcp = &p_tcp_ramrod->tcp;
 693         qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 694                             &tcp->remote_mac_addr_mid,
 695                             &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 696         qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 697                             &tcp->local_mac_addr_lo, ep->local_mac_addr);
 698 
 699         tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 700 
 701         tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 702         tcp->flags = 0;
 703         SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 704                   !!(tcp_flags & QED_IWARP_TS_EN));
 705 
 706         SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 707                   !!(tcp_flags & QED_IWARP_DA_EN));
 708 
 709         tcp->ip_version = ep->cm_info.ip_version;
 710 
 711         for (i = 0; i < 4; i++) {
 712                 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 713                 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 714         }
 715 
 716         tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 717         tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 718         tcp->mss = cpu_to_le16(ep->mss);
 719         tcp->flow_label = 0;
 720         tcp->ttl = 0x40;
 721         tcp->tos_or_tc = 0;
 722 
 723         tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 724         tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
 725         tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 726         tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
 727         tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
 728 
 729         tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 730         tcp->connect_mode = ep->connect_mode;
 731 
 732         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 733                 tcp->syn_ip_payload_length =
 734                         cpu_to_le16(ep->syn_ip_payload_length);
 735                 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 736                 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 737         }
 738 
 739         qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 740 
 741         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 742 
 743         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 744                    "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 745 
 746         return rc;
 747 }
 748 
 749 static void
 750 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 751 {
 752         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 753         struct qed_iwarp_cm_event_params params;
 754         struct mpa_v2_hdr *mpa_v2;
 755         union async_output *async_data;
 756         u16 mpa_ord, mpa_ird;
 757         u8 mpa_hdr_size = 0;
 758         u8 mpa_rev;
 759 
 760         async_data = &ep->ep_buffer_virt->async_output;
 761 
 762         mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 763         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 764                    "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 765                    async_data->mpa_request.ulp_data_len,
 766                    mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 767 
 768         if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 769                 /* Read ord/ird values from private data buffer */
 770                 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 771                 mpa_hdr_size = sizeof(*mpa_v2);
 772 
 773                 mpa_ord = ntohs(mpa_v2->ord);
 774                 mpa_ird = ntohs(mpa_v2->ird);
 775 
 776                 /* Temprary store in cm_info incoming ord/ird requested, later
 777                  * replace with negotiated value during accept
 778                  */
 779                 ep->cm_info.ord = (u8)min_t(u16,
 780                                             (mpa_ord & MPA_V2_IRD_ORD_MASK),
 781                                             QED_IWARP_ORD_DEFAULT);
 782 
 783                 ep->cm_info.ird = (u8)min_t(u16,
 784                                             (mpa_ird & MPA_V2_IRD_ORD_MASK),
 785                                             QED_IWARP_IRD_DEFAULT);
 786 
 787                 /* Peer2Peer negotiation */
 788                 ep->rtr_type = MPA_RTR_TYPE_NONE;
 789                 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 790                         if (mpa_ord & MPA_V2_WRITE_RTR)
 791                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 792 
 793                         if (mpa_ord & MPA_V2_READ_RTR)
 794                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 795 
 796                         if (mpa_ird & MPA_V2_SEND_RTR)
 797                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 798 
 799                         ep->rtr_type &= iwarp_info->rtr_type;
 800 
 801                         /* if we're left with no match send our capabilities */
 802                         if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 803                                 ep->rtr_type = iwarp_info->rtr_type;
 804                 }
 805 
 806                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 807         } else {
 808                 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 809                 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 810                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 811         }
 812 
 813         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 814                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 815                    mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 816                    async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 817 
 818         /* Strip mpa v2 hdr from private data before sending to upper layer */
 819         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 820 
 821         ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
 822                                        mpa_hdr_size;
 823 
 824         params.event = QED_IWARP_EVENT_MPA_REQUEST;
 825         params.cm_info = &ep->cm_info;
 826         params.ep_context = ep;
 827         params.status = 0;
 828 
 829         ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 830         ep->event_cb(ep->cb_context, &params);
 831 }
 832 
 833 static int
 834 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 835 {
 836         struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 837         struct qed_iwarp_info *iwarp_info;
 838         struct qed_sp_init_data init_data;
 839         dma_addr_t async_output_phys;
 840         struct qed_spq_entry *p_ent;
 841         dma_addr_t out_pdata_phys;
 842         dma_addr_t in_pdata_phys;
 843         struct qed_rdma_qp *qp;
 844         bool reject;
 845         int rc;
 846 
 847         if (!ep)
 848                 return -EINVAL;
 849 
 850         qp = ep->qp;
 851         reject = !qp;
 852 
 853         memset(&init_data, 0, sizeof(init_data));
 854         init_data.cid = reject ? ep->tcp_cid : qp->icid;
 855         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 856 
 857         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 858                 init_data.comp_mode = QED_SPQ_MODE_CB;
 859         else
 860                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 861 
 862         rc = qed_sp_init_request(p_hwfn, &p_ent,
 863                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 864                                  PROTOCOLID_IWARP, &init_data);
 865         if (rc)
 866                 return rc;
 867 
 868         p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 869         out_pdata_phys = ep->ep_buffer_phys +
 870                          offsetof(struct qed_iwarp_ep_memory, out_pdata);
 871         DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
 872                        out_pdata_phys);
 873         p_mpa_ramrod->common.outgoing_ulp_buffer.len =
 874             ep->cm_info.private_data_len;
 875         p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 876 
 877         p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
 878         p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
 879 
 880         p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 881 
 882         in_pdata_phys = ep->ep_buffer_phys +
 883                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
 884         p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 885         DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 886                        in_pdata_phys);
 887         p_mpa_ramrod->incoming_ulp_buffer.len =
 888             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 889         async_output_phys = ep->ep_buffer_phys +
 890                             offsetof(struct qed_iwarp_ep_memory, async_output);
 891         DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 892                        async_output_phys);
 893         p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 894         p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 895 
 896         if (!reject) {
 897                 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 898                                qp->shared_queue_phys_addr);
 899                 p_mpa_ramrod->stats_counter_id =
 900                     RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 901         } else {
 902                 p_mpa_ramrod->common.reject = 1;
 903         }
 904 
 905         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 906         p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 907         p_mpa_ramrod->mode = ep->mpa_rev;
 908         SET_FIELD(p_mpa_ramrod->rtr_pref,
 909                   IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 910 
 911         ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 912         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 913         if (!reject)
 914                 ep->cid = qp->icid;     /* Now they're migrated. */
 915 
 916         DP_VERBOSE(p_hwfn,
 917                    QED_MSG_RDMA,
 918                    "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 919                    reject ? 0xffff : qp->icid,
 920                    ep->tcp_cid,
 921                    rc,
 922                    ep->cm_info.ird,
 923                    ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 924         return rc;
 925 }
 926 
 927 static void
 928 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 929 {
 930         ep->state = QED_IWARP_EP_INIT;
 931         if (ep->qp)
 932                 ep->qp->ep = NULL;
 933         ep->qp = NULL;
 934         memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 935 
 936         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 937                 /* We don't care about the return code, it's ok if tcp_cid
 938                  * remains invalid...in this case we'll defer allocation
 939                  */
 940                 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 941         }
 942         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 943 
 944         list_move_tail(&ep->list_entry,
 945                        &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 946 
 947         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 948 }
 949 
 950 static void
 951 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 952 {
 953         struct mpa_v2_hdr *mpa_v2_params;
 954         union async_output *async_data;
 955         u16 mpa_ird, mpa_ord;
 956         u8 mpa_data_size = 0;
 957 
 958         if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 959                 mpa_v2_params =
 960                         (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 961                 mpa_data_size = sizeof(*mpa_v2_params);
 962                 mpa_ird = ntohs(mpa_v2_params->ird);
 963                 mpa_ord = ntohs(mpa_v2_params->ord);
 964 
 965                 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 966                 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 967         }
 968         async_data = &ep->ep_buffer_virt->async_output;
 969 
 970         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 971         ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
 972                                        mpa_data_size;
 973 }
 974 
 975 static void
 976 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 977 {
 978         struct qed_iwarp_cm_event_params params;
 979 
 980         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 981                 DP_NOTICE(p_hwfn,
 982                           "MPA reply event not expected on passive side!\n");
 983                 return;
 984         }
 985 
 986         params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 987 
 988         qed_iwarp_parse_private_data(p_hwfn, ep);
 989 
 990         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 991                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 992                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 993 
 994         params.cm_info = &ep->cm_info;
 995         params.ep_context = ep;
 996         params.status = 0;
 997 
 998         ep->mpa_reply_processed = true;
 999 
1000         ep->event_cb(ep->cb_context, &params);
1001 }
1002 
1003 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
1004         ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1005 
1006 /* Called as a result of the event:
1007  * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1008  */
1009 static void
1010 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1011                        struct qed_iwarp_ep *ep, u8 fw_return_code)
1012 {
1013         struct qed_iwarp_cm_event_params params;
1014 
1015         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1016                 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1017         else
1018                 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1019 
1020         if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1021                 qed_iwarp_parse_private_data(p_hwfn, ep);
1022 
1023         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1024                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1025                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1026 
1027         params.cm_info = &ep->cm_info;
1028 
1029         params.ep_context = ep;
1030 
1031         switch (fw_return_code) {
1032         case RDMA_RETURN_OK:
1033                 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1034                 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1035                 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1036                 ep->state = QED_IWARP_EP_ESTABLISHED;
1037                 params.status = 0;
1038                 break;
1039         case IWARP_CONN_ERROR_MPA_TIMEOUT:
1040                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1041                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042                 params.status = -EBUSY;
1043                 break;
1044         case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1045                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1046                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1047                 params.status = -ECONNREFUSED;
1048                 break;
1049         case IWARP_CONN_ERROR_MPA_RST:
1050                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1051                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1052                           ep->tcp_cid);
1053                 params.status = -ECONNRESET;
1054                 break;
1055         case IWARP_CONN_ERROR_MPA_FIN:
1056                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1057                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058                 params.status = -ECONNREFUSED;
1059                 break;
1060         case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1061                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1062                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063                 params.status = -ECONNREFUSED;
1064                 break;
1065         case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1066                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1067                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068                 params.status = -ECONNREFUSED;
1069                 break;
1070         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1071                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1072                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073                 params.status = -ECONNREFUSED;
1074                 break;
1075         case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1076                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1077                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078                 params.status = -ECONNREFUSED;
1079                 break;
1080         case IWARP_CONN_ERROR_MPA_TERMINATE:
1081                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1082                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1083                 params.status = -ECONNREFUSED;
1084                 break;
1085         default:
1086                 params.status = -ECONNRESET;
1087                 break;
1088         }
1089 
1090         if (fw_return_code != RDMA_RETURN_OK)
1091                 /* paired with READ_ONCE in destroy_qp */
1092                 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1093 
1094         ep->event_cb(ep->cb_context, &params);
1095 
1096         /* on passive side, if there is no associated QP (REJECT) we need to
1097          * return the ep to the pool, (in the regular case we add an element
1098          * in accept instead of this one.
1099          * In both cases we need to remove it from the ep_list.
1100          */
1101         if (fw_return_code != RDMA_RETURN_OK) {
1102                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1103                 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1104                     (!ep->qp)) {        /* Rejected */
1105                         qed_iwarp_return_ep(p_hwfn, ep);
1106                 } else {
1107                         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1108                         list_del(&ep->list_entry);
1109                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1110                 }
1111         }
1112 }
1113 
1114 static void
1115 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1116                              struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1117 {
1118         struct mpa_v2_hdr *mpa_v2_params;
1119         u16 mpa_ird, mpa_ord;
1120 
1121         *mpa_data_size = 0;
1122         if (MPA_REV2(ep->mpa_rev)) {
1123                 mpa_v2_params =
1124                     (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1125                 *mpa_data_size = sizeof(*mpa_v2_params);
1126 
1127                 mpa_ird = (u16)ep->cm_info.ird;
1128                 mpa_ord = (u16)ep->cm_info.ord;
1129 
1130                 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1131                         mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1132 
1133                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1134                                 mpa_ird |= MPA_V2_SEND_RTR;
1135 
1136                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1137                                 mpa_ord |= MPA_V2_WRITE_RTR;
1138 
1139                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1140                                 mpa_ord |= MPA_V2_READ_RTR;
1141                 }
1142 
1143                 mpa_v2_params->ird = htons(mpa_ird);
1144                 mpa_v2_params->ord = htons(mpa_ord);
1145 
1146                 DP_VERBOSE(p_hwfn,
1147                            QED_MSG_RDMA,
1148                            "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1149                            mpa_v2_params->ird,
1150                            mpa_v2_params->ord,
1151                            *((u32 *)mpa_v2_params),
1152                            mpa_ord & MPA_V2_IRD_ORD_MASK,
1153                            mpa_ird & MPA_V2_IRD_ORD_MASK,
1154                            !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1155                            !!(mpa_ird & MPA_V2_SEND_RTR),
1156                            !!(mpa_ord & MPA_V2_WRITE_RTR),
1157                            !!(mpa_ord & MPA_V2_READ_RTR));
1158         }
1159 }
1160 
1161 int qed_iwarp_connect(void *rdma_cxt,
1162                       struct qed_iwarp_connect_in *iparams,
1163                       struct qed_iwarp_connect_out *oparams)
1164 {
1165         struct qed_hwfn *p_hwfn = rdma_cxt;
1166         struct qed_iwarp_info *iwarp_info;
1167         struct qed_iwarp_ep *ep;
1168         u8 mpa_data_size = 0;
1169         u32 cid;
1170         int rc;
1171 
1172         if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1173             (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1174                 DP_NOTICE(p_hwfn,
1175                           "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1176                           iparams->qp->icid, iparams->cm_info.ord,
1177                           iparams->cm_info.ird);
1178 
1179                 return -EINVAL;
1180         }
1181 
1182         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1183 
1184         /* Allocate ep object */
1185         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1186         if (rc)
1187                 return rc;
1188 
1189         rc = qed_iwarp_create_ep(p_hwfn, &ep);
1190         if (rc)
1191                 goto err;
1192 
1193         ep->tcp_cid = cid;
1194 
1195         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1196         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1197         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1198 
1199         ep->qp = iparams->qp;
1200         ep->qp->ep = ep;
1201         ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1202         ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1203         memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1204 
1205         ep->cm_info.ord = iparams->cm_info.ord;
1206         ep->cm_info.ird = iparams->cm_info.ird;
1207 
1208         ep->rtr_type = iwarp_info->rtr_type;
1209         if (!iwarp_info->peer2peer)
1210                 ep->rtr_type = MPA_RTR_TYPE_NONE;
1211 
1212         if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1213                 ep->cm_info.ord = 1;
1214 
1215         ep->mpa_rev = iwarp_info->mpa_rev;
1216 
1217         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1218 
1219         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1220         ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1221                                        mpa_data_size;
1222 
1223         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1224                iparams->cm_info.private_data,
1225                iparams->cm_info.private_data_len);
1226 
1227         ep->mss = iparams->mss;
1228         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1229 
1230         ep->event_cb = iparams->event_cb;
1231         ep->cb_context = iparams->cb_context;
1232         ep->connect_mode = TCP_CONNECT_ACTIVE;
1233 
1234         oparams->ep_context = ep;
1235 
1236         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1237 
1238         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1239                    iparams->qp->icid, ep->tcp_cid, rc);
1240 
1241         if (rc) {
1242                 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1243                 goto err;
1244         }
1245 
1246         return rc;
1247 err:
1248         qed_iwarp_cid_cleaned(p_hwfn, cid);
1249 
1250         return rc;
1251 }
1252 
1253 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1254 {
1255         struct qed_iwarp_ep *ep = NULL;
1256         int rc;
1257 
1258         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1259 
1260         if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1261                 DP_ERR(p_hwfn, "Ep list is empty\n");
1262                 goto out;
1263         }
1264 
1265         ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1266                               struct qed_iwarp_ep, list_entry);
1267 
1268         /* in some cases we could have failed allocating a tcp cid when added
1269          * from accept / failure... retry now..this is not the common case.
1270          */
1271         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1272                 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1273 
1274                 /* if we fail we could look for another entry with a valid
1275                  * tcp_cid, but since we don't expect to reach this anyway
1276                  * it's not worth the handling
1277                  */
1278                 if (rc) {
1279                         ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1280                         ep = NULL;
1281                         goto out;
1282                 }
1283         }
1284 
1285         list_del(&ep->list_entry);
1286 
1287 out:
1288         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1289         return ep;
1290 }
1291 
1292 #define QED_IWARP_MAX_CID_CLEAN_TIME  100
1293 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1294 
1295 /* This function waits for all the bits of a bmap to be cleared, as long as
1296  * there is progress ( i.e. the number of bits left to be cleared decreases )
1297  * the function continues.
1298  */
1299 static int
1300 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1301 {
1302         int prev_weight = 0;
1303         int wait_count = 0;
1304         int weight = 0;
1305 
1306         weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1307         prev_weight = weight;
1308 
1309         while (weight) {
1310                 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1311 
1312                 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1313 
1314                 if (prev_weight == weight) {
1315                         wait_count++;
1316                 } else {
1317                         prev_weight = weight;
1318                         wait_count = 0;
1319                 }
1320 
1321                 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1322                         DP_NOTICE(p_hwfn,
1323                                   "%s bitmap wait timed out (%d cids pending)\n",
1324                                   bmap->name, weight);
1325                         return -EBUSY;
1326                 }
1327         }
1328         return 0;
1329 }
1330 
1331 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1332 {
1333         int rc;
1334         int i;
1335 
1336         rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1337                                             &p_hwfn->p_rdma_info->tcp_cid_map);
1338         if (rc)
1339                 return rc;
1340 
1341         /* Now free the tcp cids from the main cid map */
1342         for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1343                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1344 
1345         /* Now wait for all cids to be completed */
1346         return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1347                                               &p_hwfn->p_rdma_info->cid_map);
1348 }
1349 
1350 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1351 {
1352         struct qed_iwarp_ep *ep;
1353 
1354         while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1355                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356 
1357                 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1358                                       struct qed_iwarp_ep, list_entry);
1359 
1360                 if (!ep) {
1361                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362                         break;
1363                 }
1364                 list_del(&ep->list_entry);
1365 
1366                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1367 
1368                 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1369                         qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1370 
1371                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1372         }
1373 }
1374 
1375 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1376 {
1377         struct qed_iwarp_ep *ep;
1378         int rc = 0;
1379         int count;
1380         u32 cid;
1381         int i;
1382 
1383         count = init ? QED_IWARP_PREALLOC_CNT : 1;
1384         for (i = 0; i < count; i++) {
1385                 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1386                 if (rc)
1387                         return rc;
1388 
1389                 /* During initialization we allocate from the main pool,
1390                  * afterwards we allocate only from the tcp_cid.
1391                  */
1392                 if (init) {
1393                         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394                         if (rc)
1395                                 goto err;
1396                         qed_iwarp_set_tcp_cid(p_hwfn, cid);
1397                 } else {
1398                         /* We don't care about the return code, it's ok if
1399                          * tcp_cid remains invalid...in this case we'll
1400                          * defer allocation
1401                          */
1402                         qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1403                 }
1404 
1405                 ep->tcp_cid = cid;
1406 
1407                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408                 list_add_tail(&ep->list_entry,
1409                               &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1410                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1411         }
1412 
1413         return rc;
1414 
1415 err:
1416         qed_iwarp_destroy_ep(p_hwfn, ep, false);
1417 
1418         return rc;
1419 }
1420 
1421 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1422 {
1423         int rc;
1424 
1425         /* Allocate bitmap for tcp cid. These are used by passive side
1426          * to ensure it can allocate a tcp cid during dpc that was
1427          * pre-acquired and doesn't require dynamic allocation of ilt
1428          */
1429         rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1430                                  QED_IWARP_PREALLOC_CNT, "TCP_CID");
1431         if (rc) {
1432                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1433                            "Failed to allocate tcp cid, rc = %d\n", rc);
1434                 return rc;
1435         }
1436 
1437         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1438         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1439 
1440         rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1441         if (rc)
1442                 return rc;
1443 
1444         return qed_ooo_alloc(p_hwfn);
1445 }
1446 
1447 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1448 {
1449         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1450 
1451         qed_ooo_free(p_hwfn);
1452         qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1453         kfree(iwarp_info->mpa_bufs);
1454         kfree(iwarp_info->partial_fpdus);
1455         kfree(iwarp_info->mpa_intermediate_buf);
1456 }
1457 
1458 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1459 {
1460         struct qed_hwfn *p_hwfn = rdma_cxt;
1461         struct qed_iwarp_ep *ep;
1462         u8 mpa_data_size = 0;
1463         int rc;
1464 
1465         ep = iparams->ep_context;
1466         if (!ep) {
1467                 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1468                 return -EINVAL;
1469         }
1470 
1471         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1472                    iparams->qp->icid, ep->tcp_cid);
1473 
1474         if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1475             (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476                 DP_VERBOSE(p_hwfn,
1477                            QED_MSG_RDMA,
1478                            "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1479                            iparams->qp->icid,
1480                            ep->tcp_cid, iparams->ord, iparams->ord);
1481                 return -EINVAL;
1482         }
1483 
1484         qed_iwarp_prealloc_ep(p_hwfn, false);
1485 
1486         ep->cb_context = iparams->cb_context;
1487         ep->qp = iparams->qp;
1488         ep->qp->ep = ep;
1489 
1490         if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1491                 /* Negotiate ord/ird: if upperlayer requested ord larger than
1492                  * ird advertised by remote, we need to decrease our ord
1493                  */
1494                 if (iparams->ord > ep->cm_info.ird)
1495                         iparams->ord = ep->cm_info.ird;
1496 
1497                 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1498                     (iparams->ird == 0))
1499                         iparams->ird = 1;
1500         }
1501 
1502         /* Update cm_info ord/ird to be negotiated values */
1503         ep->cm_info.ord = iparams->ord;
1504         ep->cm_info.ird = iparams->ird;
1505 
1506         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1507 
1508         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1509         ep->cm_info.private_data_len = iparams->private_data_len +
1510                                        mpa_data_size;
1511 
1512         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1513                iparams->private_data, iparams->private_data_len);
1514 
1515         rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1516         if (rc)
1517                 qed_iwarp_modify_qp(p_hwfn,
1518                                     iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1519 
1520         return rc;
1521 }
1522 
1523 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1524 {
1525         struct qed_hwfn *p_hwfn = rdma_cxt;
1526         struct qed_iwarp_ep *ep;
1527         u8 mpa_data_size = 0;
1528 
1529         ep = iparams->ep_context;
1530         if (!ep) {
1531                 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1532                 return -EINVAL;
1533         }
1534 
1535         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1536 
1537         ep->cb_context = iparams->cb_context;
1538         ep->qp = NULL;
1539 
1540         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1541 
1542         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1543         ep->cm_info.private_data_len = iparams->private_data_len +
1544                                        mpa_data_size;
1545 
1546         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1547                iparams->private_data, iparams->private_data_len);
1548 
1549         return qed_iwarp_mpa_offload(p_hwfn, ep);
1550 }
1551 
1552 static void
1553 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1554                         struct qed_iwarp_cm_info *cm_info)
1555 {
1556         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1557                    cm_info->ip_version);
1558 
1559         if (cm_info->ip_version == QED_TCP_IPV4)
1560                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1561                            "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1562                            cm_info->remote_ip, cm_info->remote_port,
1563                            cm_info->local_ip, cm_info->local_port,
1564                            cm_info->vlan);
1565         else
1566                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1567                            "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1568                            cm_info->remote_ip, cm_info->remote_port,
1569                            cm_info->local_ip, cm_info->local_port,
1570                            cm_info->vlan);
1571 
1572         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1573                    "private_data_len = %x ord = %d, ird = %d\n",
1574                    cm_info->private_data_len, cm_info->ord, cm_info->ird);
1575 }
1576 
1577 static int
1578 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1579                       struct qed_iwarp_ll2_buff *buf, u8 handle)
1580 {
1581         int rc;
1582 
1583         rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1584                                     (u16)buf->buff_size, buf, 1);
1585         if (rc) {
1586                 DP_NOTICE(p_hwfn,
1587                           "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1588                           rc, handle);
1589                 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1590                                   buf->data, buf->data_phys_addr);
1591                 kfree(buf);
1592         }
1593 
1594         return rc;
1595 }
1596 
1597 static bool
1598 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1599 {
1600         struct qed_iwarp_ep *ep = NULL;
1601         bool found = false;
1602 
1603         list_for_each_entry(ep,
1604                             &p_hwfn->p_rdma_info->iwarp.ep_list,
1605                             list_entry) {
1606                 if ((ep->cm_info.local_port == cm_info->local_port) &&
1607                     (ep->cm_info.remote_port == cm_info->remote_port) &&
1608                     (ep->cm_info.vlan == cm_info->vlan) &&
1609                     !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1610                             sizeof(cm_info->local_ip)) &&
1611                     !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1612                             sizeof(cm_info->remote_ip))) {
1613                         found = true;
1614                         break;
1615                 }
1616         }
1617 
1618         if (found) {
1619                 DP_NOTICE(p_hwfn,
1620                           "SYN received on active connection - dropping\n");
1621                 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1622 
1623                 return true;
1624         }
1625 
1626         return false;
1627 }
1628 
1629 static struct qed_iwarp_listener *
1630 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1631                        struct qed_iwarp_cm_info *cm_info)
1632 {
1633         struct qed_iwarp_listener *listener = NULL;
1634         static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635         bool found = false;
1636 
1637         qed_iwarp_print_cm_info(p_hwfn, cm_info);
1638 
1639         list_for_each_entry(listener,
1640                             &p_hwfn->p_rdma_info->iwarp.listen_list,
1641                             list_entry) {
1642                 if (listener->port == cm_info->local_port) {
1643                         if (!memcmp(listener->ip_addr,
1644                                     ip_zero, sizeof(ip_zero))) {
1645                                 found = true;
1646                                 break;
1647                         }
1648 
1649                         if (!memcmp(listener->ip_addr,
1650                                     cm_info->local_ip,
1651                                     sizeof(cm_info->local_ip)) &&
1652                             (listener->vlan == cm_info->vlan)) {
1653                                 found = true;
1654                                 break;
1655                         }
1656                 }
1657         }
1658 
1659         if (found) {
1660                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1661                            listener);
1662                 return listener;
1663         }
1664 
1665         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1666         return NULL;
1667 }
1668 
1669 static int
1670 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1671                        struct qed_iwarp_cm_info *cm_info,
1672                        void *buf,
1673                        u8 *remote_mac_addr,
1674                        u8 *local_mac_addr,
1675                        int *payload_len, int *tcp_start_offset)
1676 {
1677         struct vlan_ethhdr *vethh;
1678         bool vlan_valid = false;
1679         struct ipv6hdr *ip6h;
1680         struct ethhdr *ethh;
1681         struct tcphdr *tcph;
1682         struct iphdr *iph;
1683         int eth_hlen;
1684         int ip_hlen;
1685         int eth_type;
1686         int i;
1687 
1688         ethh = buf;
1689         eth_type = ntohs(ethh->h_proto);
1690         if (eth_type == ETH_P_8021Q) {
1691                 vlan_valid = true;
1692                 vethh = (struct vlan_ethhdr *)ethh;
1693                 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1694                 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1695         }
1696 
1697         eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1698 
1699         if (!ether_addr_equal(ethh->h_dest,
1700                               p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1701                 DP_VERBOSE(p_hwfn,
1702                            QED_MSG_RDMA,
1703                            "Got unexpected mac %pM instead of %pM\n",
1704                            ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1705                 return -EINVAL;
1706         }
1707 
1708         ether_addr_copy(remote_mac_addr, ethh->h_source);
1709         ether_addr_copy(local_mac_addr, ethh->h_dest);
1710 
1711         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1712                    eth_type, ethh->h_source);
1713 
1714         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1715                    eth_hlen, ethh->h_dest);
1716 
1717         iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1718 
1719         if (eth_type == ETH_P_IP) {
1720                 if (iph->protocol != IPPROTO_TCP) {
1721                         DP_NOTICE(p_hwfn,
1722                                   "Unexpected ip protocol on ll2 %x\n",
1723                                   iph->protocol);
1724                         return -EINVAL;
1725                 }
1726 
1727                 cm_info->local_ip[0] = ntohl(iph->daddr);
1728                 cm_info->remote_ip[0] = ntohl(iph->saddr);
1729                 cm_info->ip_version = QED_TCP_IPV4;
1730 
1731                 ip_hlen = (iph->ihl) * sizeof(u32);
1732                 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1733         } else if (eth_type == ETH_P_IPV6) {
1734                 ip6h = (struct ipv6hdr *)iph;
1735 
1736                 if (ip6h->nexthdr != IPPROTO_TCP) {
1737                         DP_NOTICE(p_hwfn,
1738                                   "Unexpected ip protocol on ll2 %x\n",
1739                                   iph->protocol);
1740                         return -EINVAL;
1741                 }
1742 
1743                 for (i = 0; i < 4; i++) {
1744                         cm_info->local_ip[i] =
1745                             ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1746                         cm_info->remote_ip[i] =
1747                             ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1748                 }
1749                 cm_info->ip_version = QED_TCP_IPV6;
1750 
1751                 ip_hlen = sizeof(*ip6h);
1752                 *payload_len = ntohs(ip6h->payload_len);
1753         } else {
1754                 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1755                 return -EINVAL;
1756         }
1757 
1758         tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1759 
1760         if (!tcph->syn) {
1761                 DP_NOTICE(p_hwfn,
1762                           "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1763                           iph->ihl, tcph->source, tcph->dest);
1764                 return -EINVAL;
1765         }
1766 
1767         cm_info->local_port = ntohs(tcph->dest);
1768         cm_info->remote_port = ntohs(tcph->source);
1769 
1770         qed_iwarp_print_cm_info(p_hwfn, cm_info);
1771 
1772         *tcp_start_offset = eth_hlen + ip_hlen;
1773 
1774         return 0;
1775 }
1776 
1777 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1778                                                       u16 cid)
1779 {
1780         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1781         struct qed_iwarp_fpdu *partial_fpdu;
1782         u32 idx;
1783 
1784         idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1785         if (idx >= iwarp_info->max_num_partial_fpdus) {
1786                 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1787                        iwarp_info->max_num_partial_fpdus);
1788                 return NULL;
1789         }
1790 
1791         partial_fpdu = &iwarp_info->partial_fpdus[idx];
1792 
1793         return partial_fpdu;
1794 }
1795 
1796 enum qed_iwarp_mpa_pkt_type {
1797         QED_IWARP_MPA_PKT_PACKED,
1798         QED_IWARP_MPA_PKT_PARTIAL,
1799         QED_IWARP_MPA_PKT_UNALIGNED
1800 };
1801 
1802 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1803 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1804 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1805 
1806 /* Pad to multiple of 4 */
1807 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1808 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)                              \
1809         (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +                      \
1810                                          QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1811                                          QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1812 
1813 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1814 #define QED_IWARP_MAX_BDS_PER_FPDU 3
1815 
1816 static const char * const pkt_type_str[] = {
1817         "QED_IWARP_MPA_PKT_PACKED",
1818         "QED_IWARP_MPA_PKT_PARTIAL",
1819         "QED_IWARP_MPA_PKT_UNALIGNED"
1820 };
1821 
1822 static int
1823 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1824                       struct qed_iwarp_fpdu *fpdu,
1825                       struct qed_iwarp_ll2_buff *buf);
1826 
1827 static enum qed_iwarp_mpa_pkt_type
1828 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1829                        struct qed_iwarp_fpdu *fpdu,
1830                        u16 tcp_payload_len, u8 *mpa_data)
1831 {
1832         enum qed_iwarp_mpa_pkt_type pkt_type;
1833         u16 mpa_len;
1834 
1835         if (fpdu->incomplete_bytes) {
1836                 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1837                 goto out;
1838         }
1839 
1840         /* special case of one byte remaining...
1841          * lower byte will be read next packet
1842          */
1843         if (tcp_payload_len == 1) {
1844                 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1845                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1846                 goto out;
1847         }
1848 
1849         mpa_len = ntohs(*((u16 *)(mpa_data)));
1850         fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1851 
1852         if (fpdu->fpdu_length <= tcp_payload_len)
1853                 pkt_type = QED_IWARP_MPA_PKT_PACKED;
1854         else
1855                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1856 
1857 out:
1858         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1859                    "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1860                    pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1861 
1862         return pkt_type;
1863 }
1864 
1865 static void
1866 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1867                     struct qed_iwarp_fpdu *fpdu,
1868                     struct unaligned_opaque_data *pkt_data,
1869                     u16 tcp_payload_size, u8 placement_offset)
1870 {
1871         fpdu->mpa_buf = buf;
1872         fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873         fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874         fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1875         fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1876 
1877         if (tcp_payload_size == 1)
1878                 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879         else if (tcp_payload_size < fpdu->fpdu_length)
1880                 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881         else
1882                 fpdu->incomplete_bytes = 0;     /* complete fpdu */
1883 
1884         fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885 }
1886 
1887 static int
1888 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889                  struct qed_iwarp_fpdu *fpdu,
1890                  struct unaligned_opaque_data *pkt_data,
1891                  struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892 {
1893         u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1894         int rc;
1895 
1896         /* need to copy the data from the partial packet stored in fpdu
1897          * to the new buf, for this we also need to move the data currently
1898          * placed on the buf. The assumption is that the buffer is big enough
1899          * since fpdu_length <= mss, we use an intermediate buffer since
1900          * we may need to copy the new data to an overlapping location
1901          */
1902         if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1903                 DP_ERR(p_hwfn,
1904                        "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1905                        buf->buff_size, fpdu->mpa_frag_len,
1906                        tcp_payload_size, fpdu->incomplete_bytes);
1907                 return -EINVAL;
1908         }
1909 
1910         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1911                    "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1912                    fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1913                    (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1914                    tcp_payload_size);
1915 
1916         memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917         memcpy(tmp_buf + fpdu->mpa_frag_len,
1918                (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1919                tcp_payload_size);
1920 
1921         rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1922         if (rc)
1923                 return rc;
1924 
1925         /* If we managed to post the buffer copy the data to the new buffer
1926          * o/w this will occur in the next round...
1927          */
1928         memcpy((u8 *)(buf->data), tmp_buf,
1929                fpdu->mpa_frag_len + tcp_payload_size);
1930 
1931         fpdu->mpa_buf = buf;
1932         /* fpdu->pkt_hdr remains as is */
1933         /* fpdu->mpa_frag is overridden with new buf */
1934         fpdu->mpa_frag = buf->data_phys_addr;
1935         fpdu->mpa_frag_virt = buf->data;
1936         fpdu->mpa_frag_len += tcp_payload_size;
1937 
1938         fpdu->incomplete_bytes -= tcp_payload_size;
1939 
1940         DP_VERBOSE(p_hwfn,
1941                    QED_MSG_RDMA,
1942                    "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1943                    buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1944                    fpdu->incomplete_bytes);
1945 
1946         return 0;
1947 }
1948 
1949 static void
1950 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1951                              struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1952 {
1953         u16 mpa_len;
1954 
1955         /* Update incomplete packets if needed */
1956         if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1957                 /* Missing lower byte is now available */
1958                 mpa_len = fpdu->fpdu_length | *mpa_data;
1959                 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1960                 /* one byte of hdr */
1961                 fpdu->mpa_frag_len = 1;
1962                 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1963                 DP_VERBOSE(p_hwfn,
1964                            QED_MSG_RDMA,
1965                            "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1966                            mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1967         }
1968 }
1969 
1970 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1971         (GET_FIELD((_curr_pkt)->flags,     \
1972                    UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1973 
1974 /* This function is used to recycle a buffer using the ll2 drop option. It
1975  * uses the mechanism to ensure that all buffers posted to tx before this one
1976  * were completed. The buffer sent here will be sent as a cookie in the tx
1977  * completion function and can then be reposted to rx chain when done. The flow
1978  * that requires this is the flow where a FPDU splits over more than 3 tcp
1979  * segments. In this case the driver needs to re-post a rx buffer instead of
1980  * the one received, but driver can't simply repost a buffer it copied from
1981  * as there is a case where the buffer was originally a packed FPDU, and is
1982  * partially posted to FW. Driver needs to ensure FW is done with it.
1983  */
1984 static int
1985 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1986                       struct qed_iwarp_fpdu *fpdu,
1987                       struct qed_iwarp_ll2_buff *buf)
1988 {
1989         struct qed_ll2_tx_pkt_info tx_pkt;
1990         u8 ll2_handle;
1991         int rc;
1992 
1993         memset(&tx_pkt, 0, sizeof(tx_pkt));
1994         tx_pkt.num_of_bds = 1;
1995         tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1996         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1997         tx_pkt.first_frag = fpdu->pkt_hdr;
1998         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1999         buf->piggy_buf = NULL;
2000         tx_pkt.cookie = buf;
2001 
2002         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2003 
2004         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2005         if (rc)
2006                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2007                            "Can't drop packet rc=%d\n", rc);
2008 
2009         DP_VERBOSE(p_hwfn,
2010                    QED_MSG_RDMA,
2011                    "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2012                    (unsigned long int)tx_pkt.first_frag,
2013                    tx_pkt.first_frag_len, buf, rc);
2014 
2015         return rc;
2016 }
2017 
2018 static int
2019 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2020 {
2021         struct qed_ll2_tx_pkt_info tx_pkt;
2022         u8 ll2_handle;
2023         int rc;
2024 
2025         memset(&tx_pkt, 0, sizeof(tx_pkt));
2026         tx_pkt.num_of_bds = 1;
2027         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2028         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2029 
2030         tx_pkt.first_frag = fpdu->pkt_hdr;
2031         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2032         tx_pkt.enable_ip_cksum = true;
2033         tx_pkt.enable_l4_cksum = true;
2034         tx_pkt.calc_ip_len = true;
2035         /* vlan overload with enum iwarp_ll2_tx_queues */
2036         tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2037 
2038         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2039 
2040         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2041         if (rc)
2042                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2043                            "Can't send right edge rc=%d\n", rc);
2044         DP_VERBOSE(p_hwfn,
2045                    QED_MSG_RDMA,
2046                    "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2047                    tx_pkt.num_of_bds,
2048                    (unsigned long int)tx_pkt.first_frag,
2049                    tx_pkt.first_frag_len, rc);
2050 
2051         return rc;
2052 }
2053 
2054 static int
2055 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2056                     struct qed_iwarp_fpdu *fpdu,
2057                     struct unaligned_opaque_data *curr_pkt,
2058                     struct qed_iwarp_ll2_buff *buf,
2059                     u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2060 {
2061         struct qed_ll2_tx_pkt_info tx_pkt;
2062         u8 ll2_handle;
2063         int rc;
2064 
2065         memset(&tx_pkt, 0, sizeof(tx_pkt));
2066 
2067         /* An unaligned packet means it's split over two tcp segments. So the
2068          * complete packet requires 3 bds, one for the header, one for the
2069          * part of the fpdu of the first tcp segment, and the last fragment
2070          * will point to the remainder of the fpdu. A packed pdu, requires only
2071          * two bds, one for the header and one for the data.
2072          */
2073         tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076 
2077         /* Send the mpa_buf only with the last fpdu (in case of packed) */
2078         if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079             tcp_payload_size <= fpdu->fpdu_length)
2080                 tx_pkt.cookie = fpdu->mpa_buf;
2081 
2082         tx_pkt.first_frag = fpdu->pkt_hdr;
2083         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084         tx_pkt.enable_ip_cksum = true;
2085         tx_pkt.enable_l4_cksum = true;
2086         tx_pkt.calc_ip_len = true;
2087         /* vlan overload with enum iwarp_ll2_tx_queues */
2088         tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089 
2090         /* special case of unaligned packet and not packed, need to send
2091          * both buffers as cookie to release.
2092          */
2093         if (tcp_payload_size == fpdu->incomplete_bytes)
2094                 fpdu->mpa_buf->piggy_buf = buf;
2095 
2096         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097 
2098         /* Set first fragment to header */
2099         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100         if (rc)
2101                 goto out;
2102 
2103         /* Set second fragment to first part of packet */
2104         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105                                                fpdu->mpa_frag,
2106                                                fpdu->mpa_frag_len);
2107         if (rc)
2108                 goto out;
2109 
2110         if (!fpdu->incomplete_bytes)
2111                 goto out;
2112 
2113         /* Set third fragment to second part of the packet */
2114         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2115                                                ll2_handle,
2116                                                buf->data_phys_addr +
2117                                                curr_pkt->first_mpa_offset,
2118                                                fpdu->incomplete_bytes);
2119 out:
2120         DP_VERBOSE(p_hwfn,
2121                    QED_MSG_RDMA,
2122                    "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2123                    tx_pkt.num_of_bds,
2124                    tx_pkt.first_frag_len,
2125                    fpdu->mpa_frag_len,
2126                    fpdu->incomplete_bytes, rc);
2127 
2128         return rc;
2129 }
2130 
2131 static void
2132 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2133                        struct unaligned_opaque_data *curr_pkt,
2134                        u32 opaque_data0, u32 opaque_data1)
2135 {
2136         u64 opaque_data;
2137 
2138         opaque_data = HILO_64(opaque_data1, opaque_data0);
2139         *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2140 
2141         curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2142                                      le16_to_cpu(curr_pkt->first_mpa_offset);
2143         curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2144 }
2145 
2146 /* This function is called when an unaligned or incomplete MPA packet arrives
2147  * driver needs to align the packet, perhaps using previous data and send
2148  * it down to FW once it is aligned.
2149  */
2150 static int
2151 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2152                           struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2153 {
2154         struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2155         struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2156         enum qed_iwarp_mpa_pkt_type pkt_type;
2157         struct qed_iwarp_fpdu *fpdu;
2158         int rc = -EINVAL;
2159         u8 *mpa_data;
2160 
2161         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2162         if (!fpdu) { /* something corrupt with cid, post rx back */
2163                 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2164                        curr_pkt->cid);
2165                 goto err;
2166         }
2167 
2168         do {
2169                 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2170 
2171                 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2172                                                   mpa_buf->tcp_payload_len,
2173                                                   mpa_data);
2174 
2175                 switch (pkt_type) {
2176                 case QED_IWARP_MPA_PKT_PARTIAL:
2177                         qed_iwarp_init_fpdu(buf, fpdu,
2178                                             curr_pkt,
2179                                             mpa_buf->tcp_payload_len,
2180                                             mpa_buf->placement_offset);
2181 
2182                         if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2183                                 mpa_buf->tcp_payload_len = 0;
2184                                 break;
2185                         }
2186 
2187                         rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2188 
2189                         if (rc) {
2190                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2191                                            "Can't send FPDU:reset rc=%d\n", rc);
2192                                 memset(fpdu, 0, sizeof(*fpdu));
2193                                 break;
2194                         }
2195 
2196                         mpa_buf->tcp_payload_len = 0;
2197                         break;
2198                 case QED_IWARP_MPA_PKT_PACKED:
2199                         qed_iwarp_init_fpdu(buf, fpdu,
2200                                             curr_pkt,
2201                                             mpa_buf->tcp_payload_len,
2202                                             mpa_buf->placement_offset);
2203 
2204                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2205                                                  mpa_buf->tcp_payload_len,
2206                                                  pkt_type);
2207                         if (rc) {
2208                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2209                                            "Can't send FPDU:reset rc=%d\n", rc);
2210                                 memset(fpdu, 0, sizeof(*fpdu));
2211                                 break;
2212                         }
2213 
2214                         mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2215                         curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2216                         break;
2217                 case QED_IWARP_MPA_PKT_UNALIGNED:
2218                         qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2219                         if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2220                                 /* special handling of fpdu split over more
2221                                  * than 2 segments
2222                                  */
2223                                 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2224                                         rc = qed_iwarp_win_right_edge(p_hwfn,
2225                                                                       fpdu);
2226                                         /* packet will be re-processed later */
2227                                         if (rc)
2228                                                 return rc;
2229                                 }
2230 
2231                                 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2232                                                       buf,
2233                                                       mpa_buf->tcp_payload_len);
2234                                 if (rc) /* packet will be re-processed later */
2235                                         return rc;
2236 
2237                                 mpa_buf->tcp_payload_len = 0;
2238                                 break;
2239                         }
2240 
2241                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2242                                                  mpa_buf->tcp_payload_len,
2243                                                  pkt_type);
2244                         if (rc) {
2245                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2246                                            "Can't send FPDU:delay rc=%d\n", rc);
2247                                 /* don't reset fpdu -> we need it for next
2248                                  * classify
2249                                  */
2250                                 break;
2251                         }
2252 
2253                         mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2254                         curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2255                         /* The framed PDU was sent - no more incomplete bytes */
2256                         fpdu->incomplete_bytes = 0;
2257                         break;
2258                 }
2259         } while (mpa_buf->tcp_payload_len && !rc);
2260 
2261         return rc;
2262 
2263 err:
2264         qed_iwarp_ll2_post_rx(p_hwfn,
2265                               buf,
2266                               p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2267         return rc;
2268 }
2269 
2270 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2271 {
2272         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2273         struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2274         int rc;
2275 
2276         while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2277                 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2278                                            struct qed_iwarp_ll2_mpa_buf,
2279                                            list_entry);
2280 
2281                 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2282 
2283                 /* busy means break and continue processing later, don't
2284                  * remove the buf from the pending list.
2285                  */
2286                 if (rc == -EBUSY)
2287                         break;
2288 
2289                 list_move_tail(&mpa_buf->list_entry,
2290                                &iwarp_info->mpa_buf_list);
2291 
2292                 if (rc) {       /* different error, don't continue */
2293                         DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2294                         break;
2295                 }
2296         }
2297 }
2298 
2299 static void
2300 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2301 {
2302         struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2303         struct qed_iwarp_info *iwarp_info;
2304         struct qed_hwfn *p_hwfn = cxt;
2305 
2306         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2307         mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2308                                    struct qed_iwarp_ll2_mpa_buf, list_entry);
2309         if (!mpa_buf) {
2310                 DP_ERR(p_hwfn, "No free mpa buf\n");
2311                 goto err;
2312         }
2313 
2314         list_del(&mpa_buf->list_entry);
2315         qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2316                                data->opaque_data_0, data->opaque_data_1);
2317 
2318         DP_VERBOSE(p_hwfn,
2319                    QED_MSG_RDMA,
2320                    "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2321                    data->length.packet_length, mpa_buf->data.first_mpa_offset,
2322                    mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2323                    mpa_buf->data.cid);
2324 
2325         mpa_buf->ll2_buf = data->cookie;
2326         mpa_buf->tcp_payload_len = data->length.packet_length -
2327                                    mpa_buf->data.first_mpa_offset;
2328         mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2329         mpa_buf->placement_offset = data->u.placement_offset;
2330 
2331         list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2332 
2333         qed_iwarp_process_pending_pkts(p_hwfn);
2334         return;
2335 err:
2336         qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2337                               iwarp_info->ll2_mpa_handle);
2338 }
2339 
2340 static void
2341 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2342 {
2343         struct qed_iwarp_ll2_buff *buf = data->cookie;
2344         struct qed_iwarp_listener *listener;
2345         struct qed_ll2_tx_pkt_info tx_pkt;
2346         struct qed_iwarp_cm_info cm_info;
2347         struct qed_hwfn *p_hwfn = cxt;
2348         u8 remote_mac_addr[ETH_ALEN];
2349         u8 local_mac_addr[ETH_ALEN];
2350         struct qed_iwarp_ep *ep;
2351         int tcp_start_offset;
2352         u8 ll2_syn_handle;
2353         int payload_len;
2354         u32 hdr_size;
2355         int rc;
2356 
2357         memset(&cm_info, 0, sizeof(cm_info));
2358         ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2359 
2360         /* Check if packet was received with errors... */
2361         if (data->err_flags) {
2362                 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2363                           data->err_flags);
2364                 goto err;
2365         }
2366 
2367         if (GET_FIELD(data->parse_flags,
2368                       PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2369             GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2370                 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2371                 goto err;
2372         }
2373 
2374         rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2375                                     data->u.placement_offset, remote_mac_addr,
2376                                     local_mac_addr, &payload_len,
2377                                     &tcp_start_offset);
2378         if (rc)
2379                 goto err;
2380 
2381         /* Check if there is a listener for this 4-tuple+vlan */
2382         listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2383         if (!listener) {
2384                 DP_VERBOSE(p_hwfn,
2385                            QED_MSG_RDMA,
2386                            "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2387                            data->parse_flags, data->length.packet_length);
2388 
2389                 memset(&tx_pkt, 0, sizeof(tx_pkt));
2390                 tx_pkt.num_of_bds = 1;
2391                 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2392                 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2393                 tx_pkt.first_frag = buf->data_phys_addr +
2394                                     data->u.placement_offset;
2395                 tx_pkt.first_frag_len = data->length.packet_length;
2396                 tx_pkt.cookie = buf;
2397 
2398                 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2399                                                &tx_pkt, true);
2400 
2401                 if (rc) {
2402                         DP_NOTICE(p_hwfn,
2403                                   "Can't post SYN back to chip rc=%d\n", rc);
2404                         goto err;
2405                 }
2406                 return;
2407         }
2408 
2409         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2410         /* There may be an open ep on this connection if this is a syn
2411          * retrasnmit... need to make sure there isn't...
2412          */
2413         if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2414                 goto err;
2415 
2416         ep = qed_iwarp_get_free_ep(p_hwfn);
2417         if (!ep)
2418                 goto err;
2419 
2420         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2421         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2422         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2423 
2424         ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2425         ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2426 
2427         memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2428 
2429         hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2430         ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2431         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2432 
2433         ep->event_cb = listener->event_cb;
2434         ep->cb_context = listener->cb_context;
2435         ep->connect_mode = TCP_CONNECT_PASSIVE;
2436 
2437         ep->syn = buf;
2438         ep->syn_ip_payload_length = (u16)payload_len;
2439         ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2440                            tcp_start_offset;
2441 
2442         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2443         if (rc) {
2444                 qed_iwarp_return_ep(p_hwfn, ep);
2445                 goto err;
2446         }
2447 
2448         return;
2449 err:
2450         qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2451 }
2452 
2453 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2454                                      void *cookie, dma_addr_t rx_buf_addr,
2455                                      bool b_last_packet)
2456 {
2457         struct qed_iwarp_ll2_buff *buffer = cookie;
2458         struct qed_hwfn *p_hwfn = cxt;
2459 
2460         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2461                           buffer->data, buffer->data_phys_addr);
2462         kfree(buffer);
2463 }
2464 
2465 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2466                                       void *cookie, dma_addr_t first_frag_addr,
2467                                       bool b_last_fragment, bool b_last_packet)
2468 {
2469         struct qed_iwarp_ll2_buff *buffer = cookie;
2470         struct qed_iwarp_ll2_buff *piggy;
2471         struct qed_hwfn *p_hwfn = cxt;
2472 
2473         if (!buffer)            /* can happen in packed mpa unaligned... */
2474                 return;
2475 
2476         /* this was originally an rx packet, post it back */
2477         piggy = buffer->piggy_buf;
2478         if (piggy) {
2479                 buffer->piggy_buf = NULL;
2480                 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2481         }
2482 
2483         qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2484 
2485         if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2486                 qed_iwarp_process_pending_pkts(p_hwfn);
2487 
2488         return;
2489 }
2490 
2491 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2492                                      void *cookie, dma_addr_t first_frag_addr,
2493                                      bool b_last_fragment, bool b_last_packet)
2494 {
2495         struct qed_iwarp_ll2_buff *buffer = cookie;
2496         struct qed_hwfn *p_hwfn = cxt;
2497 
2498         if (!buffer)
2499                 return;
2500 
2501         if (buffer->piggy_buf) {
2502                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2503                                   buffer->piggy_buf->buff_size,
2504                                   buffer->piggy_buf->data,
2505                                   buffer->piggy_buf->data_phys_addr);
2506 
2507                 kfree(buffer->piggy_buf);
2508         }
2509 
2510         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2511                           buffer->data, buffer->data_phys_addr);
2512 
2513         kfree(buffer);
2514 }
2515 
2516 /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2517  * is received, need to reset the FPDU.
2518  */
2519 static void
2520 qed_iwarp_ll2_slowpath(void *cxt,
2521                        u8 connection_handle,
2522                        u32 opaque_data_0, u32 opaque_data_1)
2523 {
2524         struct unaligned_opaque_data unalign_data;
2525         struct qed_hwfn *p_hwfn = cxt;
2526         struct qed_iwarp_fpdu *fpdu;
2527 
2528         qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2529                                opaque_data_0, opaque_data_1);
2530 
2531         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2532                    unalign_data.cid);
2533 
2534         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2535         if (fpdu)
2536                 memset(fpdu, 0, sizeof(*fpdu));
2537 }
2538 
2539 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2540 {
2541         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2542         int rc = 0;
2543 
2544         if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2545                 rc = qed_ll2_terminate_connection(p_hwfn,
2546                                                   iwarp_info->ll2_syn_handle);
2547                 if (rc)
2548                         DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2549 
2550                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2551                 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2552         }
2553 
2554         if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2555                 rc = qed_ll2_terminate_connection(p_hwfn,
2556                                                   iwarp_info->ll2_ooo_handle);
2557                 if (rc)
2558                         DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2559 
2560                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2561                 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2562         }
2563 
2564         if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2565                 rc = qed_ll2_terminate_connection(p_hwfn,
2566                                                   iwarp_info->ll2_mpa_handle);
2567                 if (rc)
2568                         DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2569 
2570                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2571                 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2572         }
2573 
2574         qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2575                                   p_hwfn->p_rdma_info->iwarp.mac_addr);
2576 
2577         return rc;
2578 }
2579 
2580 static int
2581 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2582                             int num_rx_bufs, int buff_size, u8 ll2_handle)
2583 {
2584         struct qed_iwarp_ll2_buff *buffer;
2585         int rc = 0;
2586         int i;
2587 
2588         for (i = 0; i < num_rx_bufs; i++) {
2589                 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2590                 if (!buffer) {
2591                         rc = -ENOMEM;
2592                         break;
2593                 }
2594 
2595                 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2596                                                   buff_size,
2597                                                   &buffer->data_phys_addr,
2598                                                   GFP_KERNEL);
2599                 if (!buffer->data) {
2600                         kfree(buffer);
2601                         rc = -ENOMEM;
2602                         break;
2603                 }
2604 
2605                 buffer->buff_size = buff_size;
2606                 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2607                 if (rc)
2608                         /* buffers will be deallocated by qed_ll2 */
2609                         break;
2610         }
2611         return rc;
2612 }
2613 
2614 #define QED_IWARP_MAX_BUF_SIZE(mtu)                                  \
2615         ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2616                 ETH_CACHE_LINE_SIZE)
2617 
2618 static int
2619 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2620                     struct qed_rdma_start_in_params *params,
2621                     u32 rcv_wnd_size)
2622 {
2623         struct qed_iwarp_info *iwarp_info;
2624         struct qed_ll2_acquire_data data;
2625         struct qed_ll2_cbs cbs;
2626         u32 buff_size;
2627         u16 n_ooo_bufs;
2628         int rc = 0;
2629         int i;
2630 
2631         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2632         iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2633         iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2634         iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2635 
2636         iwarp_info->max_mtu = params->max_mtu;
2637 
2638         ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2639 
2640         rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2641         if (rc)
2642                 return rc;
2643 
2644         /* Start SYN connection */
2645         cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2646         cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2647         cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2648         cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2649         cbs.slowpath_cb = NULL;
2650         cbs.cookie = p_hwfn;
2651 
2652         memset(&data, 0, sizeof(data));
2653         data.input.conn_type = QED_LL2_TYPE_IWARP;
2654         data.input.mtu = params->max_mtu;
2655         data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2656         data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2657         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2658         data.input.tx_tc = PKT_LB_TC;
2659         data.input.tx_dest = QED_LL2_TX_DEST_LB;
2660         data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2661         data.cbs = &cbs;
2662 
2663         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2664         if (rc) {
2665                 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2666                 qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2667                 return rc;
2668         }
2669 
2670         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2671         if (rc) {
2672                 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2673                 goto err;
2674         }
2675 
2676         buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2677         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2678                                          QED_IWARP_LL2_SYN_RX_SIZE,
2679                                          buff_size,
2680                                          iwarp_info->ll2_syn_handle);
2681         if (rc)
2682                 goto err;
2683 
2684         /* Start OOO connection */
2685         data.input.conn_type = QED_LL2_TYPE_OOO;
2686         data.input.mtu = params->max_mtu;
2687 
2688         n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2689                      iwarp_info->max_mtu;
2690         n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2691 
2692         data.input.rx_num_desc = n_ooo_bufs;
2693         data.input.rx_num_ooo_buffers = n_ooo_bufs;
2694 
2695         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2696         data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2697         data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2698 
2699         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2700         if (rc)
2701                 goto err;
2702 
2703         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2704         if (rc)
2705                 goto err;
2706 
2707         /* Start Unaligned MPA connection */
2708         cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2709         cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2710 
2711         memset(&data, 0, sizeof(data));
2712         data.input.conn_type = QED_LL2_TYPE_IWARP;
2713         data.input.mtu = params->max_mtu;
2714         /* FW requires that once a packet arrives OOO, it must have at
2715          * least 2 rx buffers available on the unaligned connection
2716          * for handling the case that it is a partial fpdu.
2717          */
2718         data.input.rx_num_desc = n_ooo_bufs * 2;
2719         data.input.tx_num_desc = data.input.rx_num_desc;
2720         data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2721         data.input.tx_tc = PKT_LB_TC;
2722         data.input.tx_dest = QED_LL2_TX_DEST_LB;
2723         data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2724         data.input.secondary_queue = true;
2725         data.cbs = &cbs;
2726 
2727         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2728         if (rc)
2729                 goto err;
2730 
2731         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2732         if (rc)
2733                 goto err;
2734 
2735         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2736                                          data.input.rx_num_desc,
2737                                          buff_size,
2738                                          iwarp_info->ll2_mpa_handle);
2739         if (rc)
2740                 goto err;
2741 
2742         iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2743                                             sizeof(*iwarp_info->partial_fpdus),
2744                                             GFP_KERNEL);
2745         if (!iwarp_info->partial_fpdus)
2746                 goto err;
2747 
2748         iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2749 
2750         iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2751         if (!iwarp_info->mpa_intermediate_buf)
2752                 goto err;
2753 
2754         /* The mpa_bufs array serves for pending RX packets received on the
2755          * mpa ll2 that don't have place on the tx ring and require later
2756          * processing. We can't fail on allocation of such a struct therefore
2757          * we allocate enough to take care of all rx packets
2758          */
2759         iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2760                                        sizeof(*iwarp_info->mpa_bufs),
2761                                        GFP_KERNEL);
2762         if (!iwarp_info->mpa_bufs)
2763                 goto err;
2764 
2765         INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2766         INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2767         for (i = 0; i < data.input.rx_num_desc; i++)
2768                 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2769                               &iwarp_info->mpa_buf_list);
2770         return rc;
2771 err:
2772         qed_iwarp_ll2_stop(p_hwfn);
2773 
2774         return rc;
2775 }
2776 
2777 static struct {
2778         u32 two_ports;
2779         u32 four_ports;
2780 } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2781         {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2782         {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2783 };
2784 
2785 int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2786                     struct qed_rdma_start_in_params *params)
2787 {
2788         struct qed_dev *cdev = p_hwfn->cdev;
2789         struct qed_iwarp_info *iwarp_info;
2790         enum chip_ids chip_id;
2791         u32 rcv_wnd_size;
2792 
2793         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2794 
2795         iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2796 
2797         chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2798         rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2799                 qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2800                 qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2801 
2802         /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2803         iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2804             ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2805         iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2806         iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2807         iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2808 
2809         iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2810 
2811         iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2812                                 MPA_RTR_TYPE_ZERO_WRITE |
2813                                 MPA_RTR_TYPE_ZERO_READ;
2814 
2815         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2816         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2817         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2818 
2819         qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2820                                   qed_iwarp_async_event);
2821         qed_ooo_setup(p_hwfn);
2822 
2823         return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2824 }
2825 
2826 int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2827 {
2828         int rc;
2829 
2830         qed_iwarp_free_prealloc_ep(p_hwfn);
2831         rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2832         if (rc)
2833                 return rc;
2834 
2835         qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2836 
2837         return qed_iwarp_ll2_stop(p_hwfn);
2838 }
2839 
2840 static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2841                                   struct qed_iwarp_ep *ep,
2842                                   u8 fw_return_code)
2843 {
2844         struct qed_iwarp_cm_event_params params;
2845 
2846         qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2847 
2848         params.event = QED_IWARP_EVENT_CLOSE;
2849         params.ep_context = ep;
2850         params.cm_info = &ep->cm_info;
2851         params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2852                          0 : -ECONNRESET;
2853 
2854         /* paired with READ_ONCE in destroy_qp */
2855         smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2856 
2857         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2858         list_del(&ep->list_entry);
2859         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2860 
2861         ep->event_cb(ep->cb_context, &params);
2862 }
2863 
2864 static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2865                                          struct qed_iwarp_ep *ep,
2866                                          int fw_ret_code)
2867 {
2868         struct qed_iwarp_cm_event_params params;
2869         bool event_cb = false;
2870 
2871         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2872                    ep->cid, fw_ret_code);
2873 
2874         switch (fw_ret_code) {
2875         case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2876                 params.status = 0;
2877                 params.event = QED_IWARP_EVENT_DISCONNECT;
2878                 event_cb = true;
2879                 break;
2880         case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2881                 params.status = -ECONNRESET;
2882                 params.event = QED_IWARP_EVENT_DISCONNECT;
2883                 event_cb = true;
2884                 break;
2885         case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2886                 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2887                 event_cb = true;
2888                 break;
2889         case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2890                 params.event = QED_IWARP_EVENT_IRQ_FULL;
2891                 event_cb = true;
2892                 break;
2893         case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2894                 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2895                 event_cb = true;
2896                 break;
2897         case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2898                 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2899                 event_cb = true;
2900                 break;
2901         case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2902                 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2903                 event_cb = true;
2904                 break;
2905         case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2906                 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2907                 event_cb = true;
2908                 break;
2909         case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2910                 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2911                 event_cb = true;
2912                 break;
2913         case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2914                 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2915                 event_cb = true;
2916                 break;
2917         case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2918                 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2919                 event_cb = true;
2920                 break;
2921         default:
2922                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2923                            "Unhandled exception received...fw_ret_code=%d\n",
2924                            fw_ret_code);
2925                 break;
2926         }
2927 
2928         if (event_cb) {
2929                 params.ep_context = ep;
2930                 params.cm_info = &ep->cm_info;
2931                 ep->event_cb(ep->cb_context, &params);
2932         }
2933 }
2934 
2935 static void
2936 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2937                                    struct qed_iwarp_ep *ep, u8 fw_return_code)
2938 {
2939         struct qed_iwarp_cm_event_params params;
2940 
2941         memset(&params, 0, sizeof(params));
2942         params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2943         params.ep_context = ep;
2944         params.cm_info = &ep->cm_info;
2945         /* paired with READ_ONCE in destroy_qp */
2946         smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2947 
2948         switch (fw_return_code) {
2949         case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2950                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2951                            "%s(0x%x) TCP connect got invalid packet\n",
2952                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2953                 params.status = -ECONNRESET;
2954                 break;
2955         case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2956                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2957                            "%s(0x%x) TCP Connection Reset\n",
2958                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2959                 params.status = -ECONNRESET;
2960                 break;
2961         case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2962                 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2963                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2964                 params.status = -EBUSY;
2965                 break;
2966         case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2967                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2968                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2969                 params.status = -ECONNREFUSED;
2970                 break;
2971         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2972                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2973                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2974                 params.status = -ECONNRESET;
2975                 break;
2976         default:
2977                 DP_ERR(p_hwfn,
2978                        "%s(0x%x) Unexpected return code tcp connect: %d\n",
2979                        QED_IWARP_CONNECT_MODE_STRING(ep),
2980                        ep->tcp_cid, fw_return_code);
2981                 params.status = -ECONNRESET;
2982                 break;
2983         }
2984 
2985         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2986                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2987                 qed_iwarp_return_ep(p_hwfn, ep);
2988         } else {
2989                 ep->event_cb(ep->cb_context, &params);
2990                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2991                 list_del(&ep->list_entry);
2992                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2993         }
2994 }
2995 
2996 static void
2997 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2998                            struct qed_iwarp_ep *ep, u8 fw_return_code)
2999 {
3000         u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3001 
3002         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3003                 /* Done with the SYN packet, post back to ll2 rx */
3004                 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3005 
3006                 ep->syn = NULL;
3007 
3008                 /* If connect failed - upper layer doesn't know about it */
3009                 if (fw_return_code == RDMA_RETURN_OK)
3010                         qed_iwarp_mpa_received(p_hwfn, ep);
3011                 else
3012                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3013                                                            fw_return_code);
3014         } else {
3015                 if (fw_return_code == RDMA_RETURN_OK)
3016                         qed_iwarp_mpa_offload(p_hwfn, ep);
3017                 else
3018                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3019                                                            fw_return_code);
3020         }
3021 }
3022 
3023 static inline bool
3024 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3025 {
3026         if (!ep || (ep->sig != QED_EP_SIG)) {
3027                 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3028                 return false;
3029         }
3030 
3031         return true;
3032 }
3033 
3034 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
3035                                  u8 fw_event_code, u16 echo,
3036                                  union event_ring_data *data,
3037                                  u8 fw_return_code)
3038 {
3039         struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3040         struct regpair *fw_handle = &data->rdma_data.async_handle;
3041         struct qed_iwarp_ep *ep = NULL;
3042         u16 srq_offset;
3043         u16 srq_id;
3044         u16 cid;
3045 
3046         ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3047                                                        fw_handle->lo);
3048 
3049         switch (fw_event_code) {
3050         case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3051                 /* Async completion after TCP 3-way handshake */
3052                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3053                         return -EINVAL;
3054                 DP_VERBOSE(p_hwfn,
3055                            QED_MSG_RDMA,
3056                            "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3057                            ep->tcp_cid, fw_return_code);
3058                 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3059                 break;
3060         case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3061                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3062                         return -EINVAL;
3063                 DP_VERBOSE(p_hwfn,
3064                            QED_MSG_RDMA,
3065                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3066                            ep->cid, fw_return_code);
3067                 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3068                 break;
3069         case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3070                 /* Async completion for Close Connection ramrod */
3071                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3072                         return -EINVAL;
3073                 DP_VERBOSE(p_hwfn,
3074                            QED_MSG_RDMA,
3075                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3076                            ep->cid, fw_return_code);
3077                 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3078                 break;
3079         case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3080                 /* Async event for active side only */
3081                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3082                         return -EINVAL;
3083                 DP_VERBOSE(p_hwfn,
3084                            QED_MSG_RDMA,
3085                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3086                            ep->cid, fw_return_code);
3087                 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3088                 break;
3089         case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3090                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3091                         return -EINVAL;
3092                 DP_VERBOSE(p_hwfn,
3093                            QED_MSG_RDMA,
3094                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3095                            ep->cid, fw_return_code);
3096                 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3097                 break;
3098         case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3099                 cid = (u16)le32_to_cpu(fw_handle->lo);
3100                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3101                            "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3102                 qed_iwarp_cid_cleaned(p_hwfn, cid);
3103 
3104                 break;
3105         case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3106                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3107                 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3108                 /* FW assigns value that is no greater than u16 */
3109                 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3110                 events.affiliated_event(events.context,
3111                                         QED_IWARP_EVENT_SRQ_EMPTY,
3112                                         &srq_id);
3113                 break;
3114         case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3115                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3116                 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3117                 /* FW assigns value that is no greater than u16 */
3118                 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3119                 events.affiliated_event(events.context,
3120                                         QED_IWARP_EVENT_SRQ_LIMIT,
3121                                         &srq_id);
3122                 break;
3123         case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3124                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3125 
3126                 p_hwfn->p_rdma_info->events.affiliated_event(
3127                         p_hwfn->p_rdma_info->events.context,
3128                         QED_IWARP_EVENT_CQ_OVERFLOW,
3129                         (void *)fw_handle);
3130                 break;
3131         default:
3132                 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3133                        fw_event_code);
3134                 return -EINVAL;
3135         }
3136         return 0;
3137 }
3138 
3139 int
3140 qed_iwarp_create_listen(void *rdma_cxt,
3141                         struct qed_iwarp_listen_in *iparams,
3142                         struct qed_iwarp_listen_out *oparams)
3143 {
3144         struct qed_hwfn *p_hwfn = rdma_cxt;
3145         struct qed_iwarp_listener *listener;
3146 
3147         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3148         if (!listener)
3149                 return -ENOMEM;
3150 
3151         listener->ip_version = iparams->ip_version;
3152         memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3153         listener->port = iparams->port;
3154         listener->vlan = iparams->vlan;
3155 
3156         listener->event_cb = iparams->event_cb;
3157         listener->cb_context = iparams->cb_context;
3158         listener->max_backlog = iparams->max_backlog;
3159         oparams->handle = listener;
3160 
3161         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3162         list_add_tail(&listener->list_entry,
3163                       &p_hwfn->p_rdma_info->iwarp.listen_list);
3164         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3165 
3166         DP_VERBOSE(p_hwfn,
3167                    QED_MSG_RDMA,
3168                    "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3169                    listener->event_cb,
3170                    listener,
3171                    listener->ip_addr[0],
3172                    listener->ip_addr[1],
3173                    listener->ip_addr[2],
3174                    listener->ip_addr[3], listener->port, listener->vlan);
3175 
3176         return 0;
3177 }
3178 
3179 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3180 {
3181         struct qed_iwarp_listener *listener = handle;
3182         struct qed_hwfn *p_hwfn = rdma_cxt;
3183 
3184         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3185 
3186         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3187         list_del(&listener->list_entry);
3188         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3189 
3190         kfree(listener);
3191 
3192         return 0;
3193 }
3194 
3195 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3196 {
3197         struct qed_hwfn *p_hwfn = rdma_cxt;
3198         struct qed_sp_init_data init_data;
3199         struct qed_spq_entry *p_ent;
3200         struct qed_iwarp_ep *ep;
3201         struct qed_rdma_qp *qp;
3202         int rc;
3203 
3204         ep = iparams->ep_context;
3205         if (!ep) {
3206                 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3207                 return -EINVAL;
3208         }
3209 
3210         qp = ep->qp;
3211 
3212         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3213                    qp->icid, ep->tcp_cid);
3214 
3215         memset(&init_data, 0, sizeof(init_data));
3216         init_data.cid = qp->icid;
3217         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3218         init_data.comp_mode = QED_SPQ_MODE_CB;
3219 
3220         rc = qed_sp_init_request(p_hwfn, &p_ent,
3221                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3222                                  PROTOCOLID_IWARP, &init_data);
3223 
3224         if (rc)
3225                 return rc;
3226 
3227         rc = qed_spq_post(p_hwfn, p_ent, NULL);
3228 
3229         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3230 
3231         return rc;
3232 }
3233 
3234 void
3235 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3236                    struct qed_rdma_query_qp_out_params *out_params)
3237 {
3238         out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3239 }

/* [<][>][^][v][top][bottom][index][help] */