root/net/rds/rdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rds_pages_in_vec
  2. rds_mr_tree_walk
  3. rds_destroy_mr
  4. __rds_put_mr_final
  5. rds_rdma_drop_keys
  6. rds_pin_pages
  7. __rds_rdma_map
  8. rds_get_mr
  9. rds_get_mr_for_dest
  10. rds_free_mr
  11. rds_rdma_unuse
  12. rds_rdma_free_op
  13. rds_atomic_free_op
  14. rds_rdma_pages
  15. rds_rdma_extra_size
  16. rds_cmsg_rdma_args
  17. rds_cmsg_rdma_dest
  18. rds_cmsg_rdma_map
  19. rds_cmsg_atomic

   1 /*
   2  * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  *
  32  */
  33 #include <linux/pagemap.h>
  34 #include <linux/slab.h>
  35 #include <linux/rbtree.h>
  36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
  37 
  38 #include "rds.h"
  39 
  40 /*
  41  * XXX
  42  *  - build with sparse
  43  *  - should we detect duplicate keys on a socket?  hmm.
  44  *  - an rdma is an mlock, apply rlimit?
  45  */
  46 
  47 /*
  48  * get the number of pages by looking at the page indices that the start and
  49  * end addresses fall in.
  50  *
  51  * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
  52  * causes the address to wrap or overflows an unsigned int.  This comes
  53  * from being stored in the 'length' member of 'struct scatterlist'.
  54  */
  55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
  56 {
  57         if ((vec->addr + vec->bytes <= vec->addr) ||
  58             (vec->bytes > (u64)UINT_MAX))
  59                 return 0;
  60 
  61         return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
  62                 (vec->addr >> PAGE_SHIFT);
  63 }
  64 
  65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
  66                                        struct rds_mr *insert)
  67 {
  68         struct rb_node **p = &root->rb_node;
  69         struct rb_node *parent = NULL;
  70         struct rds_mr *mr;
  71 
  72         while (*p) {
  73                 parent = *p;
  74                 mr = rb_entry(parent, struct rds_mr, r_rb_node);
  75 
  76                 if (key < mr->r_key)
  77                         p = &(*p)->rb_left;
  78                 else if (key > mr->r_key)
  79                         p = &(*p)->rb_right;
  80                 else
  81                         return mr;
  82         }
  83 
  84         if (insert) {
  85                 rb_link_node(&insert->r_rb_node, parent, p);
  86                 rb_insert_color(&insert->r_rb_node, root);
  87                 refcount_inc(&insert->r_refcount);
  88         }
  89         return NULL;
  90 }
  91 
  92 /*
  93  * Destroy the transport-specific part of a MR.
  94  */
  95 static void rds_destroy_mr(struct rds_mr *mr)
  96 {
  97         struct rds_sock *rs = mr->r_sock;
  98         void *trans_private = NULL;
  99         unsigned long flags;
 100 
 101         rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
 102                         mr->r_key, refcount_read(&mr->r_refcount));
 103 
 104         if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
 105                 return;
 106 
 107         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 108         if (!RB_EMPTY_NODE(&mr->r_rb_node))
 109                 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 110         trans_private = mr->r_trans_private;
 111         mr->r_trans_private = NULL;
 112         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 113 
 114         if (trans_private)
 115                 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
 116 }
 117 
 118 void __rds_put_mr_final(struct rds_mr *mr)
 119 {
 120         rds_destroy_mr(mr);
 121         kfree(mr);
 122 }
 123 
 124 /*
 125  * By the time this is called we can't have any more ioctls called on
 126  * the socket so we don't need to worry about racing with others.
 127  */
 128 void rds_rdma_drop_keys(struct rds_sock *rs)
 129 {
 130         struct rds_mr *mr;
 131         struct rb_node *node;
 132         unsigned long flags;
 133 
 134         /* Release any MRs associated with this socket */
 135         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 136         while ((node = rb_first(&rs->rs_rdma_keys))) {
 137                 mr = rb_entry(node, struct rds_mr, r_rb_node);
 138                 if (mr->r_trans == rs->rs_transport)
 139                         mr->r_invalidate = 0;
 140                 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 141                 RB_CLEAR_NODE(&mr->r_rb_node);
 142                 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 143                 rds_destroy_mr(mr);
 144                 rds_mr_put(mr);
 145                 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 146         }
 147         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 148 
 149         if (rs->rs_transport && rs->rs_transport->flush_mrs)
 150                 rs->rs_transport->flush_mrs();
 151 }
 152 
 153 /*
 154  * Helper function to pin user pages.
 155  */
 156 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
 157                         struct page **pages, int write)
 158 {
 159         int ret;
 160 
 161         ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0,
 162                                   pages);
 163 
 164         if (ret >= 0 && ret < nr_pages) {
 165                 while (ret--)
 166                         put_page(pages[ret]);
 167                 ret = -EFAULT;
 168         }
 169 
 170         return ret;
 171 }
 172 
 173 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 174                           u64 *cookie_ret, struct rds_mr **mr_ret,
 175                           struct rds_conn_path *cp)
 176 {
 177         struct rds_mr *mr = NULL, *found;
 178         unsigned int nr_pages;
 179         struct page **pages = NULL;
 180         struct scatterlist *sg;
 181         void *trans_private;
 182         unsigned long flags;
 183         rds_rdma_cookie_t cookie;
 184         unsigned int nents;
 185         long i;
 186         int ret;
 187 
 188         if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
 189                 ret = -ENOTCONN; /* XXX not a great errno */
 190                 goto out;
 191         }
 192 
 193         if (!rs->rs_transport->get_mr) {
 194                 ret = -EOPNOTSUPP;
 195                 goto out;
 196         }
 197 
 198         nr_pages = rds_pages_in_vec(&args->vec);
 199         if (nr_pages == 0) {
 200                 ret = -EINVAL;
 201                 goto out;
 202         }
 203 
 204         /* Restrict the size of mr irrespective of underlying transport
 205          * To account for unaligned mr regions, subtract one from nr_pages
 206          */
 207         if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
 208                 ret = -EMSGSIZE;
 209                 goto out;
 210         }
 211 
 212         rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 213                 args->vec.addr, args->vec.bytes, nr_pages);
 214 
 215         /* XXX clamp nr_pages to limit the size of this alloc? */
 216         pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 217         if (!pages) {
 218                 ret = -ENOMEM;
 219                 goto out;
 220         }
 221 
 222         mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
 223         if (!mr) {
 224                 ret = -ENOMEM;
 225                 goto out;
 226         }
 227 
 228         refcount_set(&mr->r_refcount, 1);
 229         RB_CLEAR_NODE(&mr->r_rb_node);
 230         mr->r_trans = rs->rs_transport;
 231         mr->r_sock = rs;
 232 
 233         if (args->flags & RDS_RDMA_USE_ONCE)
 234                 mr->r_use_once = 1;
 235         if (args->flags & RDS_RDMA_INVALIDATE)
 236                 mr->r_invalidate = 1;
 237         if (args->flags & RDS_RDMA_READWRITE)
 238                 mr->r_write = 1;
 239 
 240         /*
 241          * Pin the pages that make up the user buffer and transfer the page
 242          * pointers to the mr's sg array.  We check to see if we've mapped
 243          * the whole region after transferring the partial page references
 244          * to the sg array so that we can have one page ref cleanup path.
 245          *
 246          * For now we have no flag that tells us whether the mapping is
 247          * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
 248          * the zero page.
 249          */
 250         ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
 251         if (ret < 0)
 252                 goto out;
 253 
 254         nents = ret;
 255         sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
 256         if (!sg) {
 257                 ret = -ENOMEM;
 258                 goto out;
 259         }
 260         WARN_ON(!nents);
 261         sg_init_table(sg, nents);
 262 
 263         /* Stick all pages into the scatterlist */
 264         for (i = 0 ; i < nents; i++)
 265                 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
 266 
 267         rdsdebug("RDS: trans_private nents is %u\n", nents);
 268 
 269         /* Obtain a transport specific MR. If this succeeds, the
 270          * s/g list is now owned by the MR.
 271          * Note that dma_map() implies that pending writes are
 272          * flushed to RAM, so no dma_sync is needed here. */
 273         trans_private = rs->rs_transport->get_mr(sg, nents, rs,
 274                                                  &mr->r_key,
 275                                                  cp ? cp->cp_conn : NULL);
 276 
 277         if (IS_ERR(trans_private)) {
 278                 for (i = 0 ; i < nents; i++)
 279                         put_page(sg_page(&sg[i]));
 280                 kfree(sg);
 281                 ret = PTR_ERR(trans_private);
 282                 goto out;
 283         }
 284 
 285         mr->r_trans_private = trans_private;
 286 
 287         rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
 288                mr->r_key, (void *)(unsigned long) args->cookie_addr);
 289 
 290         /* The user may pass us an unaligned address, but we can only
 291          * map page aligned regions. So we keep the offset, and build
 292          * a 64bit cookie containing <R_Key, offset> and pass that
 293          * around. */
 294         cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
 295         if (cookie_ret)
 296                 *cookie_ret = cookie;
 297 
 298         if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
 299                 ret = -EFAULT;
 300                 goto out;
 301         }
 302 
 303         /* Inserting the new MR into the rbtree bumps its
 304          * reference count. */
 305         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 306         found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
 307         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 308 
 309         BUG_ON(found && found != mr);
 310 
 311         rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
 312         if (mr_ret) {
 313                 refcount_inc(&mr->r_refcount);
 314                 *mr_ret = mr;
 315         }
 316 
 317         ret = 0;
 318 out:
 319         kfree(pages);
 320         if (mr)
 321                 rds_mr_put(mr);
 322         return ret;
 323 }
 324 
 325 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
 326 {
 327         struct rds_get_mr_args args;
 328 
 329         if (optlen != sizeof(struct rds_get_mr_args))
 330                 return -EINVAL;
 331 
 332         if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
 333                            sizeof(struct rds_get_mr_args)))
 334                 return -EFAULT;
 335 
 336         return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
 337 }
 338 
 339 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
 340 {
 341         struct rds_get_mr_for_dest_args args;
 342         struct rds_get_mr_args new_args;
 343 
 344         if (optlen != sizeof(struct rds_get_mr_for_dest_args))
 345                 return -EINVAL;
 346 
 347         if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
 348                            sizeof(struct rds_get_mr_for_dest_args)))
 349                 return -EFAULT;
 350 
 351         /*
 352          * Initially, just behave like get_mr().
 353          * TODO: Implement get_mr as wrapper around this
 354          *       and deprecate it.
 355          */
 356         new_args.vec = args.vec;
 357         new_args.cookie_addr = args.cookie_addr;
 358         new_args.flags = args.flags;
 359 
 360         return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
 361 }
 362 
 363 /*
 364  * Free the MR indicated by the given R_Key
 365  */
 366 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
 367 {
 368         struct rds_free_mr_args args;
 369         struct rds_mr *mr;
 370         unsigned long flags;
 371 
 372         if (optlen != sizeof(struct rds_free_mr_args))
 373                 return -EINVAL;
 374 
 375         if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
 376                            sizeof(struct rds_free_mr_args)))
 377                 return -EFAULT;
 378 
 379         /* Special case - a null cookie means flush all unused MRs */
 380         if (args.cookie == 0) {
 381                 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
 382                         return -EINVAL;
 383                 rs->rs_transport->flush_mrs();
 384                 return 0;
 385         }
 386 
 387         /* Look up the MR given its R_key and remove it from the rbtree
 388          * so nobody else finds it.
 389          * This should also prevent races with rds_rdma_unuse.
 390          */
 391         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 392         mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
 393         if (mr) {
 394                 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 395                 RB_CLEAR_NODE(&mr->r_rb_node);
 396                 if (args.flags & RDS_RDMA_INVALIDATE)
 397                         mr->r_invalidate = 1;
 398         }
 399         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 400 
 401         if (!mr)
 402                 return -EINVAL;
 403 
 404         /*
 405          * call rds_destroy_mr() ourselves so that we're sure it's done by the time
 406          * we return.  If we let rds_mr_put() do it it might not happen until
 407          * someone else drops their ref.
 408          */
 409         rds_destroy_mr(mr);
 410         rds_mr_put(mr);
 411         return 0;
 412 }
 413 
 414 /*
 415  * This is called when we receive an extension header that
 416  * tells us this MR was used. It allows us to implement
 417  * use_once semantics
 418  */
 419 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
 420 {
 421         struct rds_mr *mr;
 422         unsigned long flags;
 423         int zot_me = 0;
 424 
 425         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 426         mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 427         if (!mr) {
 428                 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
 429                          r_key);
 430                 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 431                 return;
 432         }
 433 
 434         if (mr->r_use_once || force) {
 435                 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 436                 RB_CLEAR_NODE(&mr->r_rb_node);
 437                 zot_me = 1;
 438         }
 439         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 440 
 441         /* May have to issue a dma_sync on this memory region.
 442          * Note we could avoid this if the operation was a RDMA READ,
 443          * but at this point we can't tell. */
 444         if (mr->r_trans->sync_mr)
 445                 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
 446 
 447         /* If the MR was marked as invalidate, this will
 448          * trigger an async flush. */
 449         if (zot_me) {
 450                 rds_destroy_mr(mr);
 451                 rds_mr_put(mr);
 452         }
 453 }
 454 
 455 void rds_rdma_free_op(struct rm_rdma_op *ro)
 456 {
 457         unsigned int i;
 458 
 459         for (i = 0; i < ro->op_nents; i++) {
 460                 struct page *page = sg_page(&ro->op_sg[i]);
 461 
 462                 /* Mark page dirty if it was possibly modified, which
 463                  * is the case for a RDMA_READ which copies from remote
 464                  * to local memory */
 465                 if (!ro->op_write) {
 466                         WARN_ON(!page->mapping && irqs_disabled());
 467                         set_page_dirty(page);
 468                 }
 469                 put_page(page);
 470         }
 471 
 472         kfree(ro->op_notifier);
 473         ro->op_notifier = NULL;
 474         ro->op_active = 0;
 475 }
 476 
 477 void rds_atomic_free_op(struct rm_atomic_op *ao)
 478 {
 479         struct page *page = sg_page(ao->op_sg);
 480 
 481         /* Mark page dirty if it was possibly modified, which
 482          * is the case for a RDMA_READ which copies from remote
 483          * to local memory */
 484         set_page_dirty(page);
 485         put_page(page);
 486 
 487         kfree(ao->op_notifier);
 488         ao->op_notifier = NULL;
 489         ao->op_active = 0;
 490 }
 491 
 492 
 493 /*
 494  * Count the number of pages needed to describe an incoming iovec array.
 495  */
 496 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
 497 {
 498         int tot_pages = 0;
 499         unsigned int nr_pages;
 500         unsigned int i;
 501 
 502         /* figure out the number of pages in the vector */
 503         for (i = 0; i < nr_iovecs; i++) {
 504                 nr_pages = rds_pages_in_vec(&iov[i]);
 505                 if (nr_pages == 0)
 506                         return -EINVAL;
 507 
 508                 tot_pages += nr_pages;
 509 
 510                 /*
 511                  * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
 512                  * so tot_pages cannot overflow without first going negative.
 513                  */
 514                 if (tot_pages < 0)
 515                         return -EINVAL;
 516         }
 517 
 518         return tot_pages;
 519 }
 520 
 521 int rds_rdma_extra_size(struct rds_rdma_args *args,
 522                         struct rds_iov_vector *iov)
 523 {
 524         struct rds_iovec *vec;
 525         struct rds_iovec __user *local_vec;
 526         int tot_pages = 0;
 527         unsigned int nr_pages;
 528         unsigned int i;
 529 
 530         local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 531 
 532         if (args->nr_local == 0)
 533                 return -EINVAL;
 534 
 535         iov->iov = kcalloc(args->nr_local,
 536                            sizeof(struct rds_iovec),
 537                            GFP_KERNEL);
 538         if (!iov->iov)
 539                 return -ENOMEM;
 540 
 541         vec = &iov->iov[0];
 542 
 543         if (copy_from_user(vec, local_vec, args->nr_local *
 544                            sizeof(struct rds_iovec)))
 545                 return -EFAULT;
 546         iov->len = args->nr_local;
 547 
 548         /* figure out the number of pages in the vector */
 549         for (i = 0; i < args->nr_local; i++, vec++) {
 550 
 551                 nr_pages = rds_pages_in_vec(vec);
 552                 if (nr_pages == 0)
 553                         return -EINVAL;
 554 
 555                 tot_pages += nr_pages;
 556 
 557                 /*
 558                  * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
 559                  * so tot_pages cannot overflow without first going negative.
 560                  */
 561                 if (tot_pages < 0)
 562                         return -EINVAL;
 563         }
 564 
 565         return tot_pages * sizeof(struct scatterlist);
 566 }
 567 
 568 /*
 569  * The application asks for a RDMA transfer.
 570  * Extract all arguments and set up the rdma_op
 571  */
 572 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
 573                        struct cmsghdr *cmsg,
 574                        struct rds_iov_vector *vec)
 575 {
 576         struct rds_rdma_args *args;
 577         struct rm_rdma_op *op = &rm->rdma;
 578         int nr_pages;
 579         unsigned int nr_bytes;
 580         struct page **pages = NULL;
 581         struct rds_iovec *iovs;
 582         unsigned int i, j;
 583         int ret = 0;
 584 
 585         if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
 586             || rm->rdma.op_active)
 587                 return -EINVAL;
 588 
 589         args = CMSG_DATA(cmsg);
 590 
 591         if (ipv6_addr_any(&rs->rs_bound_addr)) {
 592                 ret = -ENOTCONN; /* XXX not a great errno */
 593                 goto out_ret;
 594         }
 595 
 596         if (args->nr_local > UIO_MAXIOV) {
 597                 ret = -EMSGSIZE;
 598                 goto out_ret;
 599         }
 600 
 601         if (vec->len != args->nr_local) {
 602                 ret = -EINVAL;
 603                 goto out_ret;
 604         }
 605 
 606         iovs = vec->iov;
 607 
 608         nr_pages = rds_rdma_pages(iovs, args->nr_local);
 609         if (nr_pages < 0) {
 610                 ret = -EINVAL;
 611                 goto out_ret;
 612         }
 613 
 614         pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 615         if (!pages) {
 616                 ret = -ENOMEM;
 617                 goto out_ret;
 618         }
 619 
 620         op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
 621         op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
 622         op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
 623         op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
 624         op->op_active = 1;
 625         op->op_recverr = rs->rs_recverr;
 626         WARN_ON(!nr_pages);
 627         op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
 628         if (IS_ERR(op->op_sg)) {
 629                 ret = PTR_ERR(op->op_sg);
 630                 goto out_pages;
 631         }
 632 
 633         if (op->op_notify || op->op_recverr) {
 634                 /* We allocate an uninitialized notifier here, because
 635                  * we don't want to do that in the completion handler. We
 636                  * would have to use GFP_ATOMIC there, and don't want to deal
 637                  * with failed allocations.
 638                  */
 639                 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
 640                 if (!op->op_notifier) {
 641                         ret = -ENOMEM;
 642                         goto out_pages;
 643                 }
 644                 op->op_notifier->n_user_token = args->user_token;
 645                 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
 646         }
 647 
 648         /* The cookie contains the R_Key of the remote memory region, and
 649          * optionally an offset into it. This is how we implement RDMA into
 650          * unaligned memory.
 651          * When setting up the RDMA, we need to add that offset to the
 652          * destination address (which is really an offset into the MR)
 653          * FIXME: We may want to move this into ib_rdma.c
 654          */
 655         op->op_rkey = rds_rdma_cookie_key(args->cookie);
 656         op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
 657 
 658         nr_bytes = 0;
 659 
 660         rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
 661                (unsigned long long)args->nr_local,
 662                (unsigned long long)args->remote_vec.addr,
 663                op->op_rkey);
 664 
 665         for (i = 0; i < args->nr_local; i++) {
 666                 struct rds_iovec *iov = &iovs[i];
 667                 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
 668                 unsigned int nr = rds_pages_in_vec(iov);
 669 
 670                 rs->rs_user_addr = iov->addr;
 671                 rs->rs_user_bytes = iov->bytes;
 672 
 673                 /* If it's a WRITE operation, we want to pin the pages for reading.
 674                  * If it's a READ operation, we need to pin the pages for writing.
 675                  */
 676                 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
 677                 if (ret < 0)
 678                         goto out_pages;
 679                 else
 680                         ret = 0;
 681 
 682                 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
 683                          nr_bytes, nr, iov->bytes, iov->addr);
 684 
 685                 nr_bytes += iov->bytes;
 686 
 687                 for (j = 0; j < nr; j++) {
 688                         unsigned int offset = iov->addr & ~PAGE_MASK;
 689                         struct scatterlist *sg;
 690 
 691                         sg = &op->op_sg[op->op_nents + j];
 692                         sg_set_page(sg, pages[j],
 693                                         min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
 694                                         offset);
 695 
 696                         rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
 697                                sg->offset, sg->length, iov->addr, iov->bytes);
 698 
 699                         iov->addr += sg->length;
 700                         iov->bytes -= sg->length;
 701                 }
 702 
 703                 op->op_nents += nr;
 704         }
 705 
 706         if (nr_bytes > args->remote_vec.bytes) {
 707                 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
 708                                 nr_bytes,
 709                                 (unsigned int) args->remote_vec.bytes);
 710                 ret = -EINVAL;
 711                 goto out_pages;
 712         }
 713         op->op_bytes = nr_bytes;
 714 
 715 out_pages:
 716         kfree(pages);
 717 out_ret:
 718         if (ret)
 719                 rds_rdma_free_op(op);
 720         else
 721                 rds_stats_inc(s_send_rdma);
 722 
 723         return ret;
 724 }
 725 
 726 /*
 727  * The application wants us to pass an RDMA destination (aka MR)
 728  * to the remote
 729  */
 730 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
 731                           struct cmsghdr *cmsg)
 732 {
 733         unsigned long flags;
 734         struct rds_mr *mr;
 735         u32 r_key;
 736         int err = 0;
 737 
 738         if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
 739             rm->m_rdma_cookie != 0)
 740                 return -EINVAL;
 741 
 742         memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
 743 
 744         /* We are reusing a previously mapped MR here. Most likely, the
 745          * application has written to the buffer, so we need to explicitly
 746          * flush those writes to RAM. Otherwise the HCA may not see them
 747          * when doing a DMA from that buffer.
 748          */
 749         r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
 750 
 751         spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 752         mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 753         if (!mr)
 754                 err = -EINVAL;  /* invalid r_key */
 755         else
 756                 refcount_inc(&mr->r_refcount);
 757         spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 758 
 759         if (mr) {
 760                 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
 761                 rm->rdma.op_rdma_mr = mr;
 762         }
 763         return err;
 764 }
 765 
 766 /*
 767  * The application passes us an address range it wants to enable RDMA
 768  * to/from. We map the area, and save the <R_Key,offset> pair
 769  * in rm->m_rdma_cookie. This causes it to be sent along to the peer
 770  * in an extension header.
 771  */
 772 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
 773                           struct cmsghdr *cmsg)
 774 {
 775         if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
 776             rm->m_rdma_cookie != 0)
 777                 return -EINVAL;
 778 
 779         return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
 780                               &rm->rdma.op_rdma_mr, rm->m_conn_path);
 781 }
 782 
 783 /*
 784  * Fill in rds_message for an atomic request.
 785  */
 786 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
 787                     struct cmsghdr *cmsg)
 788 {
 789         struct page *page = NULL;
 790         struct rds_atomic_args *args;
 791         int ret = 0;
 792 
 793         if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
 794          || rm->atomic.op_active)
 795                 return -EINVAL;
 796 
 797         args = CMSG_DATA(cmsg);
 798 
 799         /* Nonmasked & masked cmsg ops converted to masked hw ops */
 800         switch (cmsg->cmsg_type) {
 801         case RDS_CMSG_ATOMIC_FADD:
 802                 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
 803                 rm->atomic.op_m_fadd.add = args->fadd.add;
 804                 rm->atomic.op_m_fadd.nocarry_mask = 0;
 805                 break;
 806         case RDS_CMSG_MASKED_ATOMIC_FADD:
 807                 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
 808                 rm->atomic.op_m_fadd.add = args->m_fadd.add;
 809                 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
 810                 break;
 811         case RDS_CMSG_ATOMIC_CSWP:
 812                 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
 813                 rm->atomic.op_m_cswp.compare = args->cswp.compare;
 814                 rm->atomic.op_m_cswp.swap = args->cswp.swap;
 815                 rm->atomic.op_m_cswp.compare_mask = ~0;
 816                 rm->atomic.op_m_cswp.swap_mask = ~0;
 817                 break;
 818         case RDS_CMSG_MASKED_ATOMIC_CSWP:
 819                 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
 820                 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
 821                 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
 822                 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
 823                 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
 824                 break;
 825         default:
 826                 BUG(); /* should never happen */
 827         }
 828 
 829         rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
 830         rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
 831         rm->atomic.op_active = 1;
 832         rm->atomic.op_recverr = rs->rs_recverr;
 833         rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
 834         if (IS_ERR(rm->atomic.op_sg)) {
 835                 ret = PTR_ERR(rm->atomic.op_sg);
 836                 goto err;
 837         }
 838 
 839         /* verify 8 byte-aligned */
 840         if (args->local_addr & 0x7) {
 841                 ret = -EFAULT;
 842                 goto err;
 843         }
 844 
 845         ret = rds_pin_pages(args->local_addr, 1, &page, 1);
 846         if (ret != 1)
 847                 goto err;
 848         ret = 0;
 849 
 850         sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
 851 
 852         if (rm->atomic.op_notify || rm->atomic.op_recverr) {
 853                 /* We allocate an uninitialized notifier here, because
 854                  * we don't want to do that in the completion handler. We
 855                  * would have to use GFP_ATOMIC there, and don't want to deal
 856                  * with failed allocations.
 857                  */
 858                 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
 859                 if (!rm->atomic.op_notifier) {
 860                         ret = -ENOMEM;
 861                         goto err;
 862                 }
 863 
 864                 rm->atomic.op_notifier->n_user_token = args->user_token;
 865                 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
 866         }
 867 
 868         rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
 869         rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
 870 
 871         return ret;
 872 err:
 873         if (page)
 874                 put_page(page);
 875         rm->atomic.op_active = 0;
 876         kfree(rm->atomic.op_notifier);
 877 
 878         return ret;
 879 }

/* [<][>][^][v][top][bottom][index][help] */