Lines Matching refs:ibmr
80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
82 struct rds_iw_mr *ibmr,
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
385 struct rds_iw_mr *ibmr = NULL; local
390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
391 list_del_init(&ibmr->mapping.m_list);
395 return ibmr;
401 struct rds_iw_mr *ibmr = NULL; local
405 ibmr = rds_iw_reuse_fmr(pool);
406 if (ibmr)
407 return ibmr;
433 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
434 if (!ibmr) {
439 spin_lock_init(&ibmr->mapping.m_lock);
440 INIT_LIST_HEAD(&ibmr->mapping.m_list);
441 ibmr->mapping.m_mr = ibmr;
443 err = rds_iw_init_fastreg(pool, ibmr);
448 return ibmr;
451 if (ibmr) {
452 rds_iw_destroy_fastreg(pool, ibmr);
453 kfree(ibmr);
461 struct rds_iw_mr *ibmr = trans_private; local
462 struct rds_iw_device *rds_iwdev = ibmr->device;
466 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
467 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
470 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
484 struct rds_iw_mr *ibmr, *next; local
520 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
522 list_del(&ibmr->mapping.m_list);
523 rds_iw_destroy_fastreg(pool, ibmr);
524 kfree(ibmr);
553 struct rds_iw_mr *ibmr = trans_private; local
554 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
556 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
561 rds_iw_free_fastreg(pool, ibmr);
595 struct rds_iw_mr *ibmr = NULL; local
618 ibmr = rds_iw_alloc_mr(rds_iwdev);
619 if (IS_ERR(ibmr))
620 return ibmr;
622 ibmr->cm_id = cm_id;
623 ibmr->device = rds_iwdev;
625 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
627 *key_ret = ibmr->mr->rkey;
633 if (ibmr)
634 rds_iw_free_mr(ibmr, 0);
635 ibmr = ERR_PTR(ret);
637 return ibmr;
663 struct rds_iw_mr *ibmr) argument
690 ibmr->page_list = page_list;
691 ibmr->mr = mr;
697 struct rds_iw_mr *ibmr = mapping->m_mr; local
707 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
708 mapping->m_rkey = ibmr->mr->rkey;
715 f_wr.wr.fast_reg.page_list = ibmr->page_list;
725 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
733 static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr) argument
738 if (!ibmr->cm_id->qp || !ibmr->mr)
744 s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
748 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
759 struct rds_iw_mr *ibmr, argument
764 struct rds_iw_mapping *mapping = &ibmr->mapping;
783 ibmr->page_list->page_list[i] = dma_pages[i];
801 struct rds_iw_mr *ibmr) argument
806 if (!ibmr->mapping.m_sg.dma_len)
809 ret = rds_iw_rdma_fastreg_inv(ibmr);
816 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
817 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
869 struct rds_iw_mr *ibmr) argument
871 if (ibmr->page_list)
872 ib_free_fast_reg_page_list(ibmr->page_list);
873 if (ibmr->mr)
874 ib_dereg_mr(ibmr->mr);