root/drivers/nvme/target/tcp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvmet_tcp_cmd_tag
  2. nvmet_tcp_has_data_in
  3. nvmet_tcp_need_data_in
  4. nvmet_tcp_need_data_out
  5. nvmet_tcp_has_inline_data
  6. nvmet_tcp_get_cmd
  7. nvmet_tcp_put_cmd
  8. nvmet_tcp_hdgst_len
  9. nvmet_tcp_ddgst_len
  10. nvmet_tcp_hdgst
  11. nvmet_tcp_verify_hdgst
  12. nvmet_tcp_check_ddgst
  13. nvmet_tcp_unmap_pdu_iovec
  14. nvmet_tcp_map_pdu_iovec
  15. nvmet_tcp_fatal_error
  16. nvmet_tcp_map_data
  17. nvmet_tcp_ddgst
  18. nvmet_setup_c2h_data_pdu
  19. nvmet_setup_r2t_pdu
  20. nvmet_setup_response_pdu
  21. nvmet_tcp_process_resp_list
  22. nvmet_tcp_fetch_cmd
  23. nvmet_tcp_queue_response
  24. nvmet_try_send_data_pdu
  25. nvmet_try_send_data
  26. nvmet_try_send_response
  27. nvmet_try_send_r2t
  28. nvmet_try_send_ddgst
  29. nvmet_tcp_try_send_one
  30. nvmet_tcp_try_send
  31. nvmet_prepare_receive_pdu
  32. nvmet_tcp_free_crypto
  33. nvmet_tcp_alloc_crypto
  34. nvmet_tcp_handle_icreq
  35. nvmet_tcp_handle_req_failure
  36. nvmet_tcp_handle_h2c_data_pdu
  37. nvmet_tcp_done_recv_pdu
  38. nvmet_tcp_pdu_size
  39. nvmet_tcp_pdu_valid
  40. nvmet_tcp_try_recv_pdu
  41. nvmet_tcp_prep_recv_ddgst
  42. nvmet_tcp_try_recv_data
  43. nvmet_tcp_try_recv_ddgst
  44. nvmet_tcp_try_recv_one
  45. nvmet_tcp_try_recv
  46. nvmet_tcp_schedule_release_queue
  47. nvmet_tcp_io_work
  48. nvmet_tcp_alloc_cmd
  49. nvmet_tcp_free_cmd
  50. nvmet_tcp_alloc_cmds
  51. nvmet_tcp_free_cmds
  52. nvmet_tcp_restore_socket_callbacks
  53. nvmet_tcp_finish_cmd
  54. nvmet_tcp_uninit_data_in_cmds
  55. nvmet_tcp_release_queue_work
  56. nvmet_tcp_data_ready
  57. nvmet_tcp_write_space
  58. nvmet_tcp_state_change
  59. nvmet_tcp_set_queue_sock
  60. nvmet_tcp_alloc_queue
  61. nvmet_tcp_accept_work
  62. nvmet_tcp_listen_data_ready
  63. nvmet_tcp_add_port
  64. nvmet_tcp_remove_port
  65. nvmet_tcp_delete_ctrl
  66. nvmet_tcp_install_queue
  67. nvmet_tcp_disc_port_addr
  68. nvmet_tcp_init
  69. nvmet_tcp_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * NVMe over Fabrics TCP target.
   4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
   5  */
   6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7 #include <linux/module.h>
   8 #include <linux/init.h>
   9 #include <linux/slab.h>
  10 #include <linux/err.h>
  11 #include <linux/nvme-tcp.h>
  12 #include <net/sock.h>
  13 #include <net/tcp.h>
  14 #include <linux/inet.h>
  15 #include <linux/llist.h>
  16 #include <crypto/hash.h>
  17 
  18 #include "nvmet.h"
  19 
  20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE  (4 * PAGE_SIZE)
  21 
  22 #define NVMET_TCP_RECV_BUDGET           8
  23 #define NVMET_TCP_SEND_BUDGET           8
  24 #define NVMET_TCP_IO_WORK_BUDGET        64
  25 
  26 enum nvmet_tcp_send_state {
  27         NVMET_TCP_SEND_DATA_PDU,
  28         NVMET_TCP_SEND_DATA,
  29         NVMET_TCP_SEND_R2T,
  30         NVMET_TCP_SEND_DDGST,
  31         NVMET_TCP_SEND_RESPONSE
  32 };
  33 
  34 enum nvmet_tcp_recv_state {
  35         NVMET_TCP_RECV_PDU,
  36         NVMET_TCP_RECV_DATA,
  37         NVMET_TCP_RECV_DDGST,
  38         NVMET_TCP_RECV_ERR,
  39 };
  40 
  41 enum {
  42         NVMET_TCP_F_INIT_FAILED = (1 << 0),
  43 };
  44 
  45 struct nvmet_tcp_cmd {
  46         struct nvmet_tcp_queue          *queue;
  47         struct nvmet_req                req;
  48 
  49         struct nvme_tcp_cmd_pdu         *cmd_pdu;
  50         struct nvme_tcp_rsp_pdu         *rsp_pdu;
  51         struct nvme_tcp_data_pdu        *data_pdu;
  52         struct nvme_tcp_r2t_pdu         *r2t_pdu;
  53 
  54         u32                             rbytes_done;
  55         u32                             wbytes_done;
  56 
  57         u32                             pdu_len;
  58         u32                             pdu_recv;
  59         int                             sg_idx;
  60         int                             nr_mapped;
  61         struct msghdr                   recv_msg;
  62         struct kvec                     *iov;
  63         u32                             flags;
  64 
  65         struct list_head                entry;
  66         struct llist_node               lentry;
  67 
  68         /* send state */
  69         u32                             offset;
  70         struct scatterlist              *cur_sg;
  71         enum nvmet_tcp_send_state       state;
  72 
  73         __le32                          exp_ddgst;
  74         __le32                          recv_ddgst;
  75 };
  76 
  77 enum nvmet_tcp_queue_state {
  78         NVMET_TCP_Q_CONNECTING,
  79         NVMET_TCP_Q_LIVE,
  80         NVMET_TCP_Q_DISCONNECTING,
  81 };
  82 
  83 struct nvmet_tcp_queue {
  84         struct socket           *sock;
  85         struct nvmet_tcp_port   *port;
  86         struct work_struct      io_work;
  87         int                     cpu;
  88         struct nvmet_cq         nvme_cq;
  89         struct nvmet_sq         nvme_sq;
  90 
  91         /* send state */
  92         struct nvmet_tcp_cmd    *cmds;
  93         unsigned int            nr_cmds;
  94         struct list_head        free_list;
  95         struct llist_head       resp_list;
  96         struct list_head        resp_send_list;
  97         int                     send_list_len;
  98         struct nvmet_tcp_cmd    *snd_cmd;
  99 
 100         /* recv state */
 101         int                     offset;
 102         int                     left;
 103         enum nvmet_tcp_recv_state rcv_state;
 104         struct nvmet_tcp_cmd    *cmd;
 105         union nvme_tcp_pdu      pdu;
 106 
 107         /* digest state */
 108         bool                    hdr_digest;
 109         bool                    data_digest;
 110         struct ahash_request    *snd_hash;
 111         struct ahash_request    *rcv_hash;
 112 
 113         spinlock_t              state_lock;
 114         enum nvmet_tcp_queue_state state;
 115 
 116         struct sockaddr_storage sockaddr;
 117         struct sockaddr_storage sockaddr_peer;
 118         struct work_struct      release_work;
 119 
 120         int                     idx;
 121         struct list_head        queue_list;
 122 
 123         struct nvmet_tcp_cmd    connect;
 124 
 125         struct page_frag_cache  pf_cache;
 126 
 127         void (*data_ready)(struct sock *);
 128         void (*state_change)(struct sock *);
 129         void (*write_space)(struct sock *);
 130 };
 131 
 132 struct nvmet_tcp_port {
 133         struct socket           *sock;
 134         struct work_struct      accept_work;
 135         struct nvmet_port       *nport;
 136         struct sockaddr_storage addr;
 137         int                     last_cpu;
 138         void (*data_ready)(struct sock *);
 139 };
 140 
 141 static DEFINE_IDA(nvmet_tcp_queue_ida);
 142 static LIST_HEAD(nvmet_tcp_queue_list);
 143 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
 144 
 145 static struct workqueue_struct *nvmet_tcp_wq;
 146 static struct nvmet_fabrics_ops nvmet_tcp_ops;
 147 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
 148 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
 149 
 150 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
 151                 struct nvmet_tcp_cmd *cmd)
 152 {
 153         return cmd - queue->cmds;
 154 }
 155 
 156 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
 157 {
 158         return nvme_is_write(cmd->req.cmd) &&
 159                 cmd->rbytes_done < cmd->req.transfer_len;
 160 }
 161 
 162 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
 163 {
 164         return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
 165 }
 166 
 167 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
 168 {
 169         return !nvme_is_write(cmd->req.cmd) &&
 170                 cmd->req.transfer_len > 0 &&
 171                 !cmd->req.cqe->status;
 172 }
 173 
 174 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
 175 {
 176         return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
 177                 !cmd->rbytes_done;
 178 }
 179 
 180 static inline struct nvmet_tcp_cmd *
 181 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
 182 {
 183         struct nvmet_tcp_cmd *cmd;
 184 
 185         cmd = list_first_entry_or_null(&queue->free_list,
 186                                 struct nvmet_tcp_cmd, entry);
 187         if (!cmd)
 188                 return NULL;
 189         list_del_init(&cmd->entry);
 190 
 191         cmd->rbytes_done = cmd->wbytes_done = 0;
 192         cmd->pdu_len = 0;
 193         cmd->pdu_recv = 0;
 194         cmd->iov = NULL;
 195         cmd->flags = 0;
 196         return cmd;
 197 }
 198 
 199 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
 200 {
 201         if (unlikely(cmd == &cmd->queue->connect))
 202                 return;
 203 
 204         list_add_tail(&cmd->entry, &cmd->queue->free_list);
 205 }
 206 
 207 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
 208 {
 209         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 210 }
 211 
 212 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
 213 {
 214         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 215 }
 216 
 217 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
 218                 void *pdu, size_t len)
 219 {
 220         struct scatterlist sg;
 221 
 222         sg_init_one(&sg, pdu, len);
 223         ahash_request_set_crypt(hash, &sg, pdu + len, len);
 224         crypto_ahash_digest(hash);
 225 }
 226 
 227 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
 228         void *pdu, size_t len)
 229 {
 230         struct nvme_tcp_hdr *hdr = pdu;
 231         __le32 recv_digest;
 232         __le32 exp_digest;
 233 
 234         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
 235                 pr_err("queue %d: header digest enabled but no header digest\n",
 236                         queue->idx);
 237                 return -EPROTO;
 238         }
 239 
 240         recv_digest = *(__le32 *)(pdu + hdr->hlen);
 241         nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
 242         exp_digest = *(__le32 *)(pdu + hdr->hlen);
 243         if (recv_digest != exp_digest) {
 244                 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
 245                         queue->idx, le32_to_cpu(recv_digest),
 246                         le32_to_cpu(exp_digest));
 247                 return -EPROTO;
 248         }
 249 
 250         return 0;
 251 }
 252 
 253 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
 254 {
 255         struct nvme_tcp_hdr *hdr = pdu;
 256         u8 digest_len = nvmet_tcp_hdgst_len(queue);
 257         u32 len;
 258 
 259         len = le32_to_cpu(hdr->plen) - hdr->hlen -
 260                 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
 261 
 262         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
 263                 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
 264                 return -EPROTO;
 265         }
 266 
 267         return 0;
 268 }
 269 
 270 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 271 {
 272         struct scatterlist *sg;
 273         int i;
 274 
 275         sg = &cmd->req.sg[cmd->sg_idx];
 276 
 277         for (i = 0; i < cmd->nr_mapped; i++)
 278                 kunmap(sg_page(&sg[i]));
 279 }
 280 
 281 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 282 {
 283         struct kvec *iov = cmd->iov;
 284         struct scatterlist *sg;
 285         u32 length, offset, sg_offset;
 286 
 287         length = cmd->pdu_len;
 288         cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
 289         offset = cmd->rbytes_done;
 290         cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
 291         sg_offset = offset % PAGE_SIZE;
 292         sg = &cmd->req.sg[cmd->sg_idx];
 293 
 294         while (length) {
 295                 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
 296 
 297                 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
 298                 iov->iov_len = iov_len;
 299 
 300                 length -= iov_len;
 301                 sg = sg_next(sg);
 302                 iov++;
 303         }
 304 
 305         iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
 306                 cmd->nr_mapped, cmd->pdu_len);
 307 }
 308 
 309 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
 310 {
 311         queue->rcv_state = NVMET_TCP_RECV_ERR;
 312         if (queue->nvme_sq.ctrl)
 313                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
 314         else
 315                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
 316 }
 317 
 318 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
 319 {
 320         struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
 321         u32 len = le32_to_cpu(sgl->length);
 322 
 323         if (!cmd->req.data_len)
 324                 return 0;
 325 
 326         if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
 327                           NVME_SGL_FMT_OFFSET)) {
 328                 if (!nvme_is_write(cmd->req.cmd))
 329                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 330 
 331                 if (len > cmd->req.port->inline_data_size)
 332                         return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
 333                 cmd->pdu_len = len;
 334         }
 335         cmd->req.transfer_len += len;
 336 
 337         cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
 338         if (!cmd->req.sg)
 339                 return NVME_SC_INTERNAL;
 340         cmd->cur_sg = cmd->req.sg;
 341 
 342         if (nvmet_tcp_has_data_in(cmd)) {
 343                 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
 344                                 sizeof(*cmd->iov), GFP_KERNEL);
 345                 if (!cmd->iov)
 346                         goto err;
 347         }
 348 
 349         return 0;
 350 err:
 351         sgl_free(cmd->req.sg);
 352         return NVME_SC_INTERNAL;
 353 }
 354 
 355 static void nvmet_tcp_ddgst(struct ahash_request *hash,
 356                 struct nvmet_tcp_cmd *cmd)
 357 {
 358         ahash_request_set_crypt(hash, cmd->req.sg,
 359                 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
 360         crypto_ahash_digest(hash);
 361 }
 362 
 363 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 364 {
 365         struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
 366         struct nvmet_tcp_queue *queue = cmd->queue;
 367         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 368         u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
 369 
 370         cmd->offset = 0;
 371         cmd->state = NVMET_TCP_SEND_DATA_PDU;
 372 
 373         pdu->hdr.type = nvme_tcp_c2h_data;
 374         pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
 375                                                 NVME_TCP_F_DATA_SUCCESS : 0);
 376         pdu->hdr.hlen = sizeof(*pdu);
 377         pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
 378         pdu->hdr.plen =
 379                 cpu_to_le32(pdu->hdr.hlen + hdgst +
 380                                 cmd->req.transfer_len + ddgst);
 381         pdu->command_id = cmd->req.cqe->command_id;
 382         pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
 383         pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
 384 
 385         if (queue->data_digest) {
 386                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
 387                 nvmet_tcp_ddgst(queue->snd_hash, cmd);
 388         }
 389 
 390         if (cmd->queue->hdr_digest) {
 391                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
 392                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 393         }
 394 }
 395 
 396 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
 397 {
 398         struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
 399         struct nvmet_tcp_queue *queue = cmd->queue;
 400         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 401 
 402         cmd->offset = 0;
 403         cmd->state = NVMET_TCP_SEND_R2T;
 404 
 405         pdu->hdr.type = nvme_tcp_r2t;
 406         pdu->hdr.flags = 0;
 407         pdu->hdr.hlen = sizeof(*pdu);
 408         pdu->hdr.pdo = 0;
 409         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
 410 
 411         pdu->command_id = cmd->req.cmd->common.command_id;
 412         pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
 413         pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
 414         pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
 415         if (cmd->queue->hdr_digest) {
 416                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
 417                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 418         }
 419 }
 420 
 421 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
 422 {
 423         struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
 424         struct nvmet_tcp_queue *queue = cmd->queue;
 425         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 426 
 427         cmd->offset = 0;
 428         cmd->state = NVMET_TCP_SEND_RESPONSE;
 429 
 430         pdu->hdr.type = nvme_tcp_rsp;
 431         pdu->hdr.flags = 0;
 432         pdu->hdr.hlen = sizeof(*pdu);
 433         pdu->hdr.pdo = 0;
 434         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
 435         if (cmd->queue->hdr_digest) {
 436                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
 437                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 438         }
 439 }
 440 
 441 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
 442 {
 443         struct llist_node *node;
 444 
 445         node = llist_del_all(&queue->resp_list);
 446         if (!node)
 447                 return;
 448 
 449         while (node) {
 450                 struct nvmet_tcp_cmd *cmd = llist_entry(node,
 451                                         struct nvmet_tcp_cmd, lentry);
 452 
 453                 list_add(&cmd->entry, &queue->resp_send_list);
 454                 node = node->next;
 455                 queue->send_list_len++;
 456         }
 457 }
 458 
 459 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
 460 {
 461         queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
 462                                 struct nvmet_tcp_cmd, entry);
 463         if (!queue->snd_cmd) {
 464                 nvmet_tcp_process_resp_list(queue);
 465                 queue->snd_cmd =
 466                         list_first_entry_or_null(&queue->resp_send_list,
 467                                         struct nvmet_tcp_cmd, entry);
 468                 if (unlikely(!queue->snd_cmd))
 469                         return NULL;
 470         }
 471 
 472         list_del_init(&queue->snd_cmd->entry);
 473         queue->send_list_len--;
 474 
 475         if (nvmet_tcp_need_data_out(queue->snd_cmd))
 476                 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
 477         else if (nvmet_tcp_need_data_in(queue->snd_cmd))
 478                 nvmet_setup_r2t_pdu(queue->snd_cmd);
 479         else
 480                 nvmet_setup_response_pdu(queue->snd_cmd);
 481 
 482         return queue->snd_cmd;
 483 }
 484 
 485 static void nvmet_tcp_queue_response(struct nvmet_req *req)
 486 {
 487         struct nvmet_tcp_cmd *cmd =
 488                 container_of(req, struct nvmet_tcp_cmd, req);
 489         struct nvmet_tcp_queue  *queue = cmd->queue;
 490 
 491         llist_add(&cmd->lentry, &queue->resp_list);
 492         queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
 493 }
 494 
 495 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
 496 {
 497         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 498         int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
 499         int ret;
 500 
 501         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
 502                         offset_in_page(cmd->data_pdu) + cmd->offset,
 503                         left, MSG_DONTWAIT | MSG_MORE);
 504         if (ret <= 0)
 505                 return ret;
 506 
 507         cmd->offset += ret;
 508         left -= ret;
 509 
 510         if (left)
 511                 return -EAGAIN;
 512 
 513         cmd->state = NVMET_TCP_SEND_DATA;
 514         cmd->offset  = 0;
 515         return 1;
 516 }
 517 
 518 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 519 {
 520         struct nvmet_tcp_queue *queue = cmd->queue;
 521         int ret;
 522 
 523         while (cmd->cur_sg) {
 524                 struct page *page = sg_page(cmd->cur_sg);
 525                 u32 left = cmd->cur_sg->length - cmd->offset;
 526                 int flags = MSG_DONTWAIT;
 527 
 528                 if ((!last_in_batch && cmd->queue->send_list_len) ||
 529                     cmd->wbytes_done + left < cmd->req.transfer_len ||
 530                     queue->data_digest || !queue->nvme_sq.sqhd_disabled)
 531                         flags |= MSG_MORE;
 532 
 533                 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
 534                                         left, flags);
 535                 if (ret <= 0)
 536                         return ret;
 537 
 538                 cmd->offset += ret;
 539                 cmd->wbytes_done += ret;
 540 
 541                 /* Done with sg?*/
 542                 if (cmd->offset == cmd->cur_sg->length) {
 543                         cmd->cur_sg = sg_next(cmd->cur_sg);
 544                         cmd->offset = 0;
 545                 }
 546         }
 547 
 548         if (queue->data_digest) {
 549                 cmd->state = NVMET_TCP_SEND_DDGST;
 550                 cmd->offset = 0;
 551         } else {
 552                 if (queue->nvme_sq.sqhd_disabled) {
 553                         cmd->queue->snd_cmd = NULL;
 554                         nvmet_tcp_put_cmd(cmd);
 555                 } else {
 556                         nvmet_setup_response_pdu(cmd);
 557                 }
 558         }
 559 
 560         if (queue->nvme_sq.sqhd_disabled) {
 561                 kfree(cmd->iov);
 562                 sgl_free(cmd->req.sg);
 563         }
 564 
 565         return 1;
 566 
 567 }
 568 
 569 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
 570                 bool last_in_batch)
 571 {
 572         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 573         int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
 574         int flags = MSG_DONTWAIT;
 575         int ret;
 576 
 577         if (!last_in_batch && cmd->queue->send_list_len)
 578                 flags |= MSG_MORE;
 579         else
 580                 flags |= MSG_EOR;
 581 
 582         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
 583                 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
 584         if (ret <= 0)
 585                 return ret;
 586         cmd->offset += ret;
 587         left -= ret;
 588 
 589         if (left)
 590                 return -EAGAIN;
 591 
 592         kfree(cmd->iov);
 593         sgl_free(cmd->req.sg);
 594         cmd->queue->snd_cmd = NULL;
 595         nvmet_tcp_put_cmd(cmd);
 596         return 1;
 597 }
 598 
 599 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 600 {
 601         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 602         int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
 603         int flags = MSG_DONTWAIT;
 604         int ret;
 605 
 606         if (!last_in_batch && cmd->queue->send_list_len)
 607                 flags |= MSG_MORE;
 608         else
 609                 flags |= MSG_EOR;
 610 
 611         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
 612                 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
 613         if (ret <= 0)
 614                 return ret;
 615         cmd->offset += ret;
 616         left -= ret;
 617 
 618         if (left)
 619                 return -EAGAIN;
 620 
 621         cmd->queue->snd_cmd = NULL;
 622         return 1;
 623 }
 624 
 625 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
 626 {
 627         struct nvmet_tcp_queue *queue = cmd->queue;
 628         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
 629         struct kvec iov = {
 630                 .iov_base = &cmd->exp_ddgst + cmd->offset,
 631                 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
 632         };
 633         int ret;
 634 
 635         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
 636         if (unlikely(ret <= 0))
 637                 return ret;
 638 
 639         cmd->offset += ret;
 640 
 641         if (queue->nvme_sq.sqhd_disabled) {
 642                 cmd->queue->snd_cmd = NULL;
 643                 nvmet_tcp_put_cmd(cmd);
 644         } else {
 645                 nvmet_setup_response_pdu(cmd);
 646         }
 647         return 1;
 648 }
 649 
 650 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
 651                 bool last_in_batch)
 652 {
 653         struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
 654         int ret = 0;
 655 
 656         if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
 657                 cmd = nvmet_tcp_fetch_cmd(queue);
 658                 if (unlikely(!cmd))
 659                         return 0;
 660         }
 661 
 662         if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
 663                 ret = nvmet_try_send_data_pdu(cmd);
 664                 if (ret <= 0)
 665                         goto done_send;
 666         }
 667 
 668         if (cmd->state == NVMET_TCP_SEND_DATA) {
 669                 ret = nvmet_try_send_data(cmd, last_in_batch);
 670                 if (ret <= 0)
 671                         goto done_send;
 672         }
 673 
 674         if (cmd->state == NVMET_TCP_SEND_DDGST) {
 675                 ret = nvmet_try_send_ddgst(cmd);
 676                 if (ret <= 0)
 677                         goto done_send;
 678         }
 679 
 680         if (cmd->state == NVMET_TCP_SEND_R2T) {
 681                 ret = nvmet_try_send_r2t(cmd, last_in_batch);
 682                 if (ret <= 0)
 683                         goto done_send;
 684         }
 685 
 686         if (cmd->state == NVMET_TCP_SEND_RESPONSE)
 687                 ret = nvmet_try_send_response(cmd, last_in_batch);
 688 
 689 done_send:
 690         if (ret < 0) {
 691                 if (ret == -EAGAIN)
 692                         return 0;
 693                 return ret;
 694         }
 695 
 696         return 1;
 697 }
 698 
 699 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
 700                 int budget, int *sends)
 701 {
 702         int i, ret = 0;
 703 
 704         for (i = 0; i < budget; i++) {
 705                 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
 706                 if (ret <= 0)
 707                         break;
 708                 (*sends)++;
 709         }
 710 
 711         return ret;
 712 }
 713 
 714 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
 715 {
 716         queue->offset = 0;
 717         queue->left = sizeof(struct nvme_tcp_hdr);
 718         queue->cmd = NULL;
 719         queue->rcv_state = NVMET_TCP_RECV_PDU;
 720 }
 721 
 722 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
 723 {
 724         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
 725 
 726         ahash_request_free(queue->rcv_hash);
 727         ahash_request_free(queue->snd_hash);
 728         crypto_free_ahash(tfm);
 729 }
 730 
 731 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
 732 {
 733         struct crypto_ahash *tfm;
 734 
 735         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
 736         if (IS_ERR(tfm))
 737                 return PTR_ERR(tfm);
 738 
 739         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
 740         if (!queue->snd_hash)
 741                 goto free_tfm;
 742         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
 743 
 744         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
 745         if (!queue->rcv_hash)
 746                 goto free_snd_hash;
 747         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
 748 
 749         return 0;
 750 free_snd_hash:
 751         ahash_request_free(queue->snd_hash);
 752 free_tfm:
 753         crypto_free_ahash(tfm);
 754         return -ENOMEM;
 755 }
 756 
 757 
 758 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
 759 {
 760         struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
 761         struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
 762         struct msghdr msg = {};
 763         struct kvec iov;
 764         int ret;
 765 
 766         if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
 767                 pr_err("bad nvme-tcp pdu length (%d)\n",
 768                         le32_to_cpu(icreq->hdr.plen));
 769                 nvmet_tcp_fatal_error(queue);
 770         }
 771 
 772         if (icreq->pfv != NVME_TCP_PFV_1_0) {
 773                 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
 774                 return -EPROTO;
 775         }
 776 
 777         if (icreq->hpda != 0) {
 778                 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
 779                         icreq->hpda);
 780                 return -EPROTO;
 781         }
 782 
 783         queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
 784         queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
 785         if (queue->hdr_digest || queue->data_digest) {
 786                 ret = nvmet_tcp_alloc_crypto(queue);
 787                 if (ret)
 788                         return ret;
 789         }
 790 
 791         memset(icresp, 0, sizeof(*icresp));
 792         icresp->hdr.type = nvme_tcp_icresp;
 793         icresp->hdr.hlen = sizeof(*icresp);
 794         icresp->hdr.pdo = 0;
 795         icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
 796         icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
 797         icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
 798         icresp->cpda = 0;
 799         if (queue->hdr_digest)
 800                 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
 801         if (queue->data_digest)
 802                 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
 803 
 804         iov.iov_base = icresp;
 805         iov.iov_len = sizeof(*icresp);
 806         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
 807         if (ret < 0)
 808                 goto free_crypto;
 809 
 810         queue->state = NVMET_TCP_Q_LIVE;
 811         nvmet_prepare_receive_pdu(queue);
 812         return 0;
 813 free_crypto:
 814         if (queue->hdr_digest || queue->data_digest)
 815                 nvmet_tcp_free_crypto(queue);
 816         return ret;
 817 }
 818 
 819 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
 820                 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
 821 {
 822         int ret;
 823 
 824         /* recover the expected data transfer length */
 825         req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
 826 
 827         if (!nvme_is_write(cmd->req.cmd) ||
 828             req->data_len > cmd->req.port->inline_data_size) {
 829                 nvmet_prepare_receive_pdu(queue);
 830                 return;
 831         }
 832 
 833         ret = nvmet_tcp_map_data(cmd);
 834         if (unlikely(ret)) {
 835                 pr_err("queue %d: failed to map data\n", queue->idx);
 836                 nvmet_tcp_fatal_error(queue);
 837                 return;
 838         }
 839 
 840         queue->rcv_state = NVMET_TCP_RECV_DATA;
 841         nvmet_tcp_map_pdu_iovec(cmd);
 842         cmd->flags |= NVMET_TCP_F_INIT_FAILED;
 843 }
 844 
 845 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
 846 {
 847         struct nvme_tcp_data_pdu *data = &queue->pdu.data;
 848         struct nvmet_tcp_cmd *cmd;
 849 
 850         cmd = &queue->cmds[data->ttag];
 851 
 852         if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
 853                 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
 854                         data->ttag, le32_to_cpu(data->data_offset),
 855                         cmd->rbytes_done);
 856                 /* FIXME: use path and transport errors */
 857                 nvmet_req_complete(&cmd->req,
 858                         NVME_SC_INVALID_FIELD | NVME_SC_DNR);
 859                 return -EPROTO;
 860         }
 861 
 862         cmd->pdu_len = le32_to_cpu(data->data_length);
 863         cmd->pdu_recv = 0;
 864         nvmet_tcp_map_pdu_iovec(cmd);
 865         queue->cmd = cmd;
 866         queue->rcv_state = NVMET_TCP_RECV_DATA;
 867 
 868         return 0;
 869 }
 870 
 871 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
 872 {
 873         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
 874         struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
 875         struct nvmet_req *req;
 876         int ret;
 877 
 878         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
 879                 if (hdr->type != nvme_tcp_icreq) {
 880                         pr_err("unexpected pdu type (%d) before icreq\n",
 881                                 hdr->type);
 882                         nvmet_tcp_fatal_error(queue);
 883                         return -EPROTO;
 884                 }
 885                 return nvmet_tcp_handle_icreq(queue);
 886         }
 887 
 888         if (hdr->type == nvme_tcp_h2c_data) {
 889                 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
 890                 if (unlikely(ret))
 891                         return ret;
 892                 return 0;
 893         }
 894 
 895         queue->cmd = nvmet_tcp_get_cmd(queue);
 896         if (unlikely(!queue->cmd)) {
 897                 /* This should never happen */
 898                 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
 899                         queue->idx, queue->nr_cmds, queue->send_list_len,
 900                         nvme_cmd->common.opcode);
 901                 nvmet_tcp_fatal_error(queue);
 902                 return -ENOMEM;
 903         }
 904 
 905         req = &queue->cmd->req;
 906         memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
 907 
 908         if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
 909                         &queue->nvme_sq, &nvmet_tcp_ops))) {
 910                 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
 911                         req->cmd, req->cmd->common.command_id,
 912                         req->cmd->common.opcode,
 913                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
 914 
 915                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
 916                 return -EAGAIN;
 917         }
 918 
 919         ret = nvmet_tcp_map_data(queue->cmd);
 920         if (unlikely(ret)) {
 921                 pr_err("queue %d: failed to map data\n", queue->idx);
 922                 if (nvmet_tcp_has_inline_data(queue->cmd))
 923                         nvmet_tcp_fatal_error(queue);
 924                 else
 925                         nvmet_req_complete(req, ret);
 926                 ret = -EAGAIN;
 927                 goto out;
 928         }
 929 
 930         if (nvmet_tcp_need_data_in(queue->cmd)) {
 931                 if (nvmet_tcp_has_inline_data(queue->cmd)) {
 932                         queue->rcv_state = NVMET_TCP_RECV_DATA;
 933                         nvmet_tcp_map_pdu_iovec(queue->cmd);
 934                         return 0;
 935                 }
 936                 /* send back R2T */
 937                 nvmet_tcp_queue_response(&queue->cmd->req);
 938                 goto out;
 939         }
 940 
 941         nvmet_req_execute(&queue->cmd->req);
 942 out:
 943         nvmet_prepare_receive_pdu(queue);
 944         return ret;
 945 }
 946 
 947 static const u8 nvme_tcp_pdu_sizes[] = {
 948         [nvme_tcp_icreq]        = sizeof(struct nvme_tcp_icreq_pdu),
 949         [nvme_tcp_cmd]          = sizeof(struct nvme_tcp_cmd_pdu),
 950         [nvme_tcp_h2c_data]     = sizeof(struct nvme_tcp_data_pdu),
 951 };
 952 
 953 static inline u8 nvmet_tcp_pdu_size(u8 type)
 954 {
 955         size_t idx = type;
 956 
 957         return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
 958                 nvme_tcp_pdu_sizes[idx]) ?
 959                         nvme_tcp_pdu_sizes[idx] : 0;
 960 }
 961 
 962 static inline bool nvmet_tcp_pdu_valid(u8 type)
 963 {
 964         switch (type) {
 965         case nvme_tcp_icreq:
 966         case nvme_tcp_cmd:
 967         case nvme_tcp_h2c_data:
 968                 /* fallthru */
 969                 return true;
 970         }
 971 
 972         return false;
 973 }
 974 
 975 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
 976 {
 977         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
 978         int len;
 979         struct kvec iov;
 980         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
 981 
 982 recv:
 983         iov.iov_base = (void *)&queue->pdu + queue->offset;
 984         iov.iov_len = queue->left;
 985         len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
 986                         iov.iov_len, msg.msg_flags);
 987         if (unlikely(len < 0))
 988                 return len;
 989 
 990         queue->offset += len;
 991         queue->left -= len;
 992         if (queue->left)
 993                 return -EAGAIN;
 994 
 995         if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
 996                 u8 hdgst = nvmet_tcp_hdgst_len(queue);
 997 
 998                 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
 999                         pr_err("unexpected pdu type %d\n", hdr->type);
1000                         nvmet_tcp_fatal_error(queue);
1001                         return -EIO;
1002                 }
1003 
1004                 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1005                         pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1006                         return -EIO;
1007                 }
1008 
1009                 queue->left = hdr->hlen - queue->offset + hdgst;
1010                 goto recv;
1011         }
1012 
1013         if (queue->hdr_digest &&
1014             nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1015                 nvmet_tcp_fatal_error(queue); /* fatal */
1016                 return -EPROTO;
1017         }
1018 
1019         if (queue->data_digest &&
1020             nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1021                 nvmet_tcp_fatal_error(queue); /* fatal */
1022                 return -EPROTO;
1023         }
1024 
1025         return nvmet_tcp_done_recv_pdu(queue);
1026 }
1027 
1028 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1029 {
1030         struct nvmet_tcp_queue *queue = cmd->queue;
1031 
1032         nvmet_tcp_ddgst(queue->rcv_hash, cmd);
1033         queue->offset = 0;
1034         queue->left = NVME_TCP_DIGEST_LENGTH;
1035         queue->rcv_state = NVMET_TCP_RECV_DDGST;
1036 }
1037 
1038 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1039 {
1040         struct nvmet_tcp_cmd  *cmd = queue->cmd;
1041         int ret;
1042 
1043         while (msg_data_left(&cmd->recv_msg)) {
1044                 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1045                         cmd->recv_msg.msg_flags);
1046                 if (ret <= 0)
1047                         return ret;
1048 
1049                 cmd->pdu_recv += ret;
1050                 cmd->rbytes_done += ret;
1051         }
1052 
1053         nvmet_tcp_unmap_pdu_iovec(cmd);
1054 
1055         if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1056             cmd->rbytes_done == cmd->req.transfer_len) {
1057                 if (queue->data_digest) {
1058                         nvmet_tcp_prep_recv_ddgst(cmd);
1059                         return 0;
1060                 }
1061                 nvmet_req_execute(&cmd->req);
1062         }
1063 
1064         nvmet_prepare_receive_pdu(queue);
1065         return 0;
1066 }
1067 
1068 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1069 {
1070         struct nvmet_tcp_cmd *cmd = queue->cmd;
1071         int ret;
1072         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1073         struct kvec iov = {
1074                 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1075                 .iov_len = queue->left
1076         };
1077 
1078         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1079                         iov.iov_len, msg.msg_flags);
1080         if (unlikely(ret < 0))
1081                 return ret;
1082 
1083         queue->offset += ret;
1084         queue->left -= ret;
1085         if (queue->left)
1086                 return -EAGAIN;
1087 
1088         if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1089                 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1090                         queue->idx, cmd->req.cmd->common.command_id,
1091                         queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1092                         le32_to_cpu(cmd->exp_ddgst));
1093                 nvmet_tcp_finish_cmd(cmd);
1094                 nvmet_tcp_fatal_error(queue);
1095                 ret = -EPROTO;
1096                 goto out;
1097         }
1098 
1099         if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1100             cmd->rbytes_done == cmd->req.transfer_len)
1101                 nvmet_req_execute(&cmd->req);
1102         ret = 0;
1103 out:
1104         nvmet_prepare_receive_pdu(queue);
1105         return ret;
1106 }
1107 
1108 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1109 {
1110         int result = 0;
1111 
1112         if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1113                 return 0;
1114 
1115         if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1116                 result = nvmet_tcp_try_recv_pdu(queue);
1117                 if (result != 0)
1118                         goto done_recv;
1119         }
1120 
1121         if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1122                 result = nvmet_tcp_try_recv_data(queue);
1123                 if (result != 0)
1124                         goto done_recv;
1125         }
1126 
1127         if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1128                 result = nvmet_tcp_try_recv_ddgst(queue);
1129                 if (result != 0)
1130                         goto done_recv;
1131         }
1132 
1133 done_recv:
1134         if (result < 0) {
1135                 if (result == -EAGAIN)
1136                         return 0;
1137                 return result;
1138         }
1139         return 1;
1140 }
1141 
1142 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1143                 int budget, int *recvs)
1144 {
1145         int i, ret = 0;
1146 
1147         for (i = 0; i < budget; i++) {
1148                 ret = nvmet_tcp_try_recv_one(queue);
1149                 if (ret <= 0)
1150                         break;
1151                 (*recvs)++;
1152         }
1153 
1154         return ret;
1155 }
1156 
1157 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1158 {
1159         spin_lock(&queue->state_lock);
1160         if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1161                 queue->state = NVMET_TCP_Q_DISCONNECTING;
1162                 schedule_work(&queue->release_work);
1163         }
1164         spin_unlock(&queue->state_lock);
1165 }
1166 
1167 static void nvmet_tcp_io_work(struct work_struct *w)
1168 {
1169         struct nvmet_tcp_queue *queue =
1170                 container_of(w, struct nvmet_tcp_queue, io_work);
1171         bool pending;
1172         int ret, ops = 0;
1173 
1174         do {
1175                 pending = false;
1176 
1177                 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1178                 if (ret > 0) {
1179                         pending = true;
1180                 } else if (ret < 0) {
1181                         if (ret == -EPIPE || ret == -ECONNRESET)
1182                                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1183                         else
1184                                 nvmet_tcp_fatal_error(queue);
1185                         return;
1186                 }
1187 
1188                 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1189                 if (ret > 0) {
1190                         /* transmitted message/data */
1191                         pending = true;
1192                 } else if (ret < 0) {
1193                         if (ret == -EPIPE || ret == -ECONNRESET)
1194                                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1195                         else
1196                                 nvmet_tcp_fatal_error(queue);
1197                         return;
1198                 }
1199 
1200         } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1201 
1202         /*
1203          * We exahusted our budget, requeue our selves
1204          */
1205         if (pending)
1206                 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1207 }
1208 
1209 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1210                 struct nvmet_tcp_cmd *c)
1211 {
1212         u8 hdgst = nvmet_tcp_hdgst_len(queue);
1213 
1214         c->queue = queue;
1215         c->req.port = queue->port->nport;
1216 
1217         c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1218                         sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1219         if (!c->cmd_pdu)
1220                 return -ENOMEM;
1221         c->req.cmd = &c->cmd_pdu->cmd;
1222 
1223         c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1224                         sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1225         if (!c->rsp_pdu)
1226                 goto out_free_cmd;
1227         c->req.cqe = &c->rsp_pdu->cqe;
1228 
1229         c->data_pdu = page_frag_alloc(&queue->pf_cache,
1230                         sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1231         if (!c->data_pdu)
1232                 goto out_free_rsp;
1233 
1234         c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1235                         sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1236         if (!c->r2t_pdu)
1237                 goto out_free_data;
1238 
1239         c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1240 
1241         list_add_tail(&c->entry, &queue->free_list);
1242 
1243         return 0;
1244 out_free_data:
1245         page_frag_free(c->data_pdu);
1246 out_free_rsp:
1247         page_frag_free(c->rsp_pdu);
1248 out_free_cmd:
1249         page_frag_free(c->cmd_pdu);
1250         return -ENOMEM;
1251 }
1252 
1253 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1254 {
1255         page_frag_free(c->r2t_pdu);
1256         page_frag_free(c->data_pdu);
1257         page_frag_free(c->rsp_pdu);
1258         page_frag_free(c->cmd_pdu);
1259 }
1260 
1261 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1262 {
1263         struct nvmet_tcp_cmd *cmds;
1264         int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1265 
1266         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1267         if (!cmds)
1268                 goto out;
1269 
1270         for (i = 0; i < nr_cmds; i++) {
1271                 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1272                 if (ret)
1273                         goto out_free;
1274         }
1275 
1276         queue->cmds = cmds;
1277 
1278         return 0;
1279 out_free:
1280         while (--i >= 0)
1281                 nvmet_tcp_free_cmd(cmds + i);
1282         kfree(cmds);
1283 out:
1284         return ret;
1285 }
1286 
1287 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1288 {
1289         struct nvmet_tcp_cmd *cmds = queue->cmds;
1290         int i;
1291 
1292         for (i = 0; i < queue->nr_cmds; i++)
1293                 nvmet_tcp_free_cmd(cmds + i);
1294 
1295         nvmet_tcp_free_cmd(&queue->connect);
1296         kfree(cmds);
1297 }
1298 
1299 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1300 {
1301         struct socket *sock = queue->sock;
1302 
1303         write_lock_bh(&sock->sk->sk_callback_lock);
1304         sock->sk->sk_data_ready =  queue->data_ready;
1305         sock->sk->sk_state_change = queue->state_change;
1306         sock->sk->sk_write_space = queue->write_space;
1307         sock->sk->sk_user_data = NULL;
1308         write_unlock_bh(&sock->sk->sk_callback_lock);
1309 }
1310 
1311 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1312 {
1313         nvmet_req_uninit(&cmd->req);
1314         nvmet_tcp_unmap_pdu_iovec(cmd);
1315         kfree(cmd->iov);
1316         sgl_free(cmd->req.sg);
1317 }
1318 
1319 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1320 {
1321         struct nvmet_tcp_cmd *cmd = queue->cmds;
1322         int i;
1323 
1324         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1325                 if (nvmet_tcp_need_data_in(cmd))
1326                         nvmet_tcp_finish_cmd(cmd);
1327         }
1328 
1329         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1330                 /* failed in connect */
1331                 nvmet_tcp_finish_cmd(&queue->connect);
1332         }
1333 }
1334 
1335 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1336 {
1337         struct nvmet_tcp_queue *queue =
1338                 container_of(w, struct nvmet_tcp_queue, release_work);
1339 
1340         mutex_lock(&nvmet_tcp_queue_mutex);
1341         list_del_init(&queue->queue_list);
1342         mutex_unlock(&nvmet_tcp_queue_mutex);
1343 
1344         nvmet_tcp_restore_socket_callbacks(queue);
1345         flush_work(&queue->io_work);
1346 
1347         nvmet_tcp_uninit_data_in_cmds(queue);
1348         nvmet_sq_destroy(&queue->nvme_sq);
1349         cancel_work_sync(&queue->io_work);
1350         sock_release(queue->sock);
1351         nvmet_tcp_free_cmds(queue);
1352         if (queue->hdr_digest || queue->data_digest)
1353                 nvmet_tcp_free_crypto(queue);
1354         ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1355 
1356         kfree(queue);
1357 }
1358 
1359 static void nvmet_tcp_data_ready(struct sock *sk)
1360 {
1361         struct nvmet_tcp_queue *queue;
1362 
1363         read_lock_bh(&sk->sk_callback_lock);
1364         queue = sk->sk_user_data;
1365         if (likely(queue))
1366                 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1367         read_unlock_bh(&sk->sk_callback_lock);
1368 }
1369 
1370 static void nvmet_tcp_write_space(struct sock *sk)
1371 {
1372         struct nvmet_tcp_queue *queue;
1373 
1374         read_lock_bh(&sk->sk_callback_lock);
1375         queue = sk->sk_user_data;
1376         if (unlikely(!queue))
1377                 goto out;
1378 
1379         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1380                 queue->write_space(sk);
1381                 goto out;
1382         }
1383 
1384         if (sk_stream_is_writeable(sk)) {
1385                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1386                 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1387         }
1388 out:
1389         read_unlock_bh(&sk->sk_callback_lock);
1390 }
1391 
1392 static void nvmet_tcp_state_change(struct sock *sk)
1393 {
1394         struct nvmet_tcp_queue *queue;
1395 
1396         write_lock_bh(&sk->sk_callback_lock);
1397         queue = sk->sk_user_data;
1398         if (!queue)
1399                 goto done;
1400 
1401         switch (sk->sk_state) {
1402         case TCP_FIN_WAIT1:
1403         case TCP_CLOSE_WAIT:
1404         case TCP_CLOSE:
1405                 /* FALLTHRU */
1406                 sk->sk_user_data = NULL;
1407                 nvmet_tcp_schedule_release_queue(queue);
1408                 break;
1409         default:
1410                 pr_warn("queue %d unhandled state %d\n",
1411                         queue->idx, sk->sk_state);
1412         }
1413 done:
1414         write_unlock_bh(&sk->sk_callback_lock);
1415 }
1416 
1417 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1418 {
1419         struct socket *sock = queue->sock;
1420         struct inet_sock *inet = inet_sk(sock->sk);
1421         struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1422         int ret;
1423 
1424         ret = kernel_getsockname(sock,
1425                 (struct sockaddr *)&queue->sockaddr);
1426         if (ret < 0)
1427                 return ret;
1428 
1429         ret = kernel_getpeername(sock,
1430                 (struct sockaddr *)&queue->sockaddr_peer);
1431         if (ret < 0)
1432                 return ret;
1433 
1434         /*
1435          * Cleanup whatever is sitting in the TCP transmit queue on socket
1436          * close. This is done to prevent stale data from being sent should
1437          * the network connection be restored before TCP times out.
1438          */
1439         ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
1440                         (char *)&sol, sizeof(sol));
1441         if (ret)
1442                 return ret;
1443 
1444         /* Set socket type of service */
1445         if (inet->rcv_tos > 0) {
1446                 int tos = inet->rcv_tos;
1447 
1448                 ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
1449                                 (char *)&tos, sizeof(tos));
1450                 if (ret)
1451                         return ret;
1452         }
1453 
1454         write_lock_bh(&sock->sk->sk_callback_lock);
1455         sock->sk->sk_user_data = queue;
1456         queue->data_ready = sock->sk->sk_data_ready;
1457         sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1458         queue->state_change = sock->sk->sk_state_change;
1459         sock->sk->sk_state_change = nvmet_tcp_state_change;
1460         queue->write_space = sock->sk->sk_write_space;
1461         sock->sk->sk_write_space = nvmet_tcp_write_space;
1462         write_unlock_bh(&sock->sk->sk_callback_lock);
1463 
1464         return 0;
1465 }
1466 
1467 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1468                 struct socket *newsock)
1469 {
1470         struct nvmet_tcp_queue *queue;
1471         int ret;
1472 
1473         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1474         if (!queue)
1475                 return -ENOMEM;
1476 
1477         INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1478         INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1479         queue->sock = newsock;
1480         queue->port = port;
1481         queue->nr_cmds = 0;
1482         spin_lock_init(&queue->state_lock);
1483         queue->state = NVMET_TCP_Q_CONNECTING;
1484         INIT_LIST_HEAD(&queue->free_list);
1485         init_llist_head(&queue->resp_list);
1486         INIT_LIST_HEAD(&queue->resp_send_list);
1487 
1488         queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1489         if (queue->idx < 0) {
1490                 ret = queue->idx;
1491                 goto out_free_queue;
1492         }
1493 
1494         ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1495         if (ret)
1496                 goto out_ida_remove;
1497 
1498         ret = nvmet_sq_init(&queue->nvme_sq);
1499         if (ret)
1500                 goto out_free_connect;
1501 
1502         port->last_cpu = cpumask_next_wrap(port->last_cpu,
1503                                 cpu_online_mask, -1, false);
1504         queue->cpu = port->last_cpu;
1505         nvmet_prepare_receive_pdu(queue);
1506 
1507         mutex_lock(&nvmet_tcp_queue_mutex);
1508         list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1509         mutex_unlock(&nvmet_tcp_queue_mutex);
1510 
1511         ret = nvmet_tcp_set_queue_sock(queue);
1512         if (ret)
1513                 goto out_destroy_sq;
1514 
1515         queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1516 
1517         return 0;
1518 out_destroy_sq:
1519         mutex_lock(&nvmet_tcp_queue_mutex);
1520         list_del_init(&queue->queue_list);
1521         mutex_unlock(&nvmet_tcp_queue_mutex);
1522         nvmet_sq_destroy(&queue->nvme_sq);
1523 out_free_connect:
1524         nvmet_tcp_free_cmd(&queue->connect);
1525 out_ida_remove:
1526         ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1527 out_free_queue:
1528         kfree(queue);
1529         return ret;
1530 }
1531 
1532 static void nvmet_tcp_accept_work(struct work_struct *w)
1533 {
1534         struct nvmet_tcp_port *port =
1535                 container_of(w, struct nvmet_tcp_port, accept_work);
1536         struct socket *newsock;
1537         int ret;
1538 
1539         while (true) {
1540                 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1541                 if (ret < 0) {
1542                         if (ret != -EAGAIN)
1543                                 pr_warn("failed to accept err=%d\n", ret);
1544                         return;
1545                 }
1546                 ret = nvmet_tcp_alloc_queue(port, newsock);
1547                 if (ret) {
1548                         pr_err("failed to allocate queue\n");
1549                         sock_release(newsock);
1550                 }
1551         }
1552 }
1553 
1554 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1555 {
1556         struct nvmet_tcp_port *port;
1557 
1558         read_lock_bh(&sk->sk_callback_lock);
1559         port = sk->sk_user_data;
1560         if (!port)
1561                 goto out;
1562 
1563         if (sk->sk_state == TCP_LISTEN)
1564                 schedule_work(&port->accept_work);
1565 out:
1566         read_unlock_bh(&sk->sk_callback_lock);
1567 }
1568 
1569 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1570 {
1571         struct nvmet_tcp_port *port;
1572         __kernel_sa_family_t af;
1573         int opt, ret;
1574 
1575         port = kzalloc(sizeof(*port), GFP_KERNEL);
1576         if (!port)
1577                 return -ENOMEM;
1578 
1579         switch (nport->disc_addr.adrfam) {
1580         case NVMF_ADDR_FAMILY_IP4:
1581                 af = AF_INET;
1582                 break;
1583         case NVMF_ADDR_FAMILY_IP6:
1584                 af = AF_INET6;
1585                 break;
1586         default:
1587                 pr_err("address family %d not supported\n",
1588                                 nport->disc_addr.adrfam);
1589                 ret = -EINVAL;
1590                 goto err_port;
1591         }
1592 
1593         ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1594                         nport->disc_addr.trsvcid, &port->addr);
1595         if (ret) {
1596                 pr_err("malformed ip/port passed: %s:%s\n",
1597                         nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1598                 goto err_port;
1599         }
1600 
1601         port->nport = nport;
1602         port->last_cpu = -1;
1603         INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1604         if (port->nport->inline_data_size < 0)
1605                 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1606 
1607         ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1608                                 IPPROTO_TCP, &port->sock);
1609         if (ret) {
1610                 pr_err("failed to create a socket\n");
1611                 goto err_port;
1612         }
1613 
1614         port->sock->sk->sk_user_data = port;
1615         port->data_ready = port->sock->sk->sk_data_ready;
1616         port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1617 
1618         opt = 1;
1619         ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
1620                         TCP_NODELAY, (char *)&opt, sizeof(opt));
1621         if (ret) {
1622                 pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
1623                 goto err_sock;
1624         }
1625 
1626         ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
1627                         (char *)&opt, sizeof(opt));
1628         if (ret) {
1629                 pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
1630                 goto err_sock;
1631         }
1632 
1633         ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1634                         sizeof(port->addr));
1635         if (ret) {
1636                 pr_err("failed to bind port socket %d\n", ret);
1637                 goto err_sock;
1638         }
1639 
1640         ret = kernel_listen(port->sock, 128);
1641         if (ret) {
1642                 pr_err("failed to listen %d on port sock\n", ret);
1643                 goto err_sock;
1644         }
1645 
1646         nport->priv = port;
1647         pr_info("enabling port %d (%pISpc)\n",
1648                 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1649 
1650         return 0;
1651 
1652 err_sock:
1653         sock_release(port->sock);
1654 err_port:
1655         kfree(port);
1656         return ret;
1657 }
1658 
1659 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1660 {
1661         struct nvmet_tcp_port *port = nport->priv;
1662 
1663         write_lock_bh(&port->sock->sk->sk_callback_lock);
1664         port->sock->sk->sk_data_ready = port->data_ready;
1665         port->sock->sk->sk_user_data = NULL;
1666         write_unlock_bh(&port->sock->sk->sk_callback_lock);
1667         cancel_work_sync(&port->accept_work);
1668 
1669         sock_release(port->sock);
1670         kfree(port);
1671 }
1672 
1673 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1674 {
1675         struct nvmet_tcp_queue *queue;
1676 
1677         mutex_lock(&nvmet_tcp_queue_mutex);
1678         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1679                 if (queue->nvme_sq.ctrl == ctrl)
1680                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1681         mutex_unlock(&nvmet_tcp_queue_mutex);
1682 }
1683 
1684 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1685 {
1686         struct nvmet_tcp_queue *queue =
1687                 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1688 
1689         if (sq->qid == 0) {
1690                 /* Let inflight controller teardown complete */
1691                 flush_scheduled_work();
1692         }
1693 
1694         queue->nr_cmds = sq->size * 2;
1695         if (nvmet_tcp_alloc_cmds(queue))
1696                 return NVME_SC_INTERNAL;
1697         return 0;
1698 }
1699 
1700 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1701                 struct nvmet_port *nport, char *traddr)
1702 {
1703         struct nvmet_tcp_port *port = nport->priv;
1704 
1705         if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1706                 struct nvmet_tcp_cmd *cmd =
1707                         container_of(req, struct nvmet_tcp_cmd, req);
1708                 struct nvmet_tcp_queue *queue = cmd->queue;
1709 
1710                 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1711         } else {
1712                 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1713         }
1714 }
1715 
1716 static struct nvmet_fabrics_ops nvmet_tcp_ops = {
1717         .owner                  = THIS_MODULE,
1718         .type                   = NVMF_TRTYPE_TCP,
1719         .msdbd                  = 1,
1720         .has_keyed_sgls         = 0,
1721         .add_port               = nvmet_tcp_add_port,
1722         .remove_port            = nvmet_tcp_remove_port,
1723         .queue_response         = nvmet_tcp_queue_response,
1724         .delete_ctrl            = nvmet_tcp_delete_ctrl,
1725         .install_queue          = nvmet_tcp_install_queue,
1726         .disc_traddr            = nvmet_tcp_disc_port_addr,
1727 };
1728 
1729 static int __init nvmet_tcp_init(void)
1730 {
1731         int ret;
1732 
1733         nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1734         if (!nvmet_tcp_wq)
1735                 return -ENOMEM;
1736 
1737         ret = nvmet_register_transport(&nvmet_tcp_ops);
1738         if (ret)
1739                 goto err;
1740 
1741         return 0;
1742 err:
1743         destroy_workqueue(nvmet_tcp_wq);
1744         return ret;
1745 }
1746 
1747 static void __exit nvmet_tcp_exit(void)
1748 {
1749         struct nvmet_tcp_queue *queue;
1750 
1751         nvmet_unregister_transport(&nvmet_tcp_ops);
1752 
1753         flush_scheduled_work();
1754         mutex_lock(&nvmet_tcp_queue_mutex);
1755         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1756                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1757         mutex_unlock(&nvmet_tcp_queue_mutex);
1758         flush_scheduled_work();
1759 
1760         destroy_workqueue(nvmet_tcp_wq);
1761 }
1762 
1763 module_init(nvmet_tcp_init);
1764 module_exit(nvmet_tcp_exit);
1765 
1766 MODULE_LICENSE("GPL v2");
1767 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */

/* [<][>][^][v][top][bottom][index][help] */