/linux-4.1.27/drivers/infiniband/core/ |
D | fmr_pool.c | 120 struct ib_pool_fmr *fmr; in ib_fmr_cache_lookup() local 127 hlist_for_each_entry(fmr, bucket, cache_node) in ib_fmr_cache_lookup() 128 if (io_virtual_address == fmr->io_virtual_address && in ib_fmr_cache_lookup() 129 page_list_len == fmr->page_list_len && in ib_fmr_cache_lookup() 130 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup() 132 return fmr; in ib_fmr_cache_lookup() 140 struct ib_pool_fmr *fmr; in ib_fmr_batch_release() local 146 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release() 147 hlist_del_init(&fmr->cache_node); in ib_fmr_batch_release() 148 fmr->remap_count = 0; in ib_fmr_batch_release() [all …]
|
D | verbs.c | 1300 struct ib_fmr *fmr; in ib_alloc_fmr() local 1305 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); in ib_alloc_fmr() 1306 if (!IS_ERR(fmr)) { in ib_alloc_fmr() 1307 fmr->device = pd->device; in ib_alloc_fmr() 1308 fmr->pd = pd; in ib_alloc_fmr() 1312 return fmr; in ib_alloc_fmr() 1318 struct ib_fmr *fmr; in ib_unmap_fmr() local 1323 fmr = list_entry(fmr_list->next, struct ib_fmr, list); in ib_unmap_fmr() 1324 return fmr->device->unmap_fmr(fmr_list); in ib_unmap_fmr() 1328 int ib_dealloc_fmr(struct ib_fmr *fmr) in ib_dealloc_fmr() argument [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_mr.c | 285 struct ipath_fmr *fmr; in ipath_alloc_fmr() local 291 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); in ipath_alloc_fmr() 292 if (!fmr) in ipath_alloc_fmr() 297 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], in ipath_alloc_fmr() 299 if (!fmr->mr.map[i]) in ipath_alloc_fmr() 302 fmr->mr.mapsz = m; in ipath_alloc_fmr() 308 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) in ipath_alloc_fmr() 310 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey; in ipath_alloc_fmr() 315 fmr->mr.pd = pd; in ipath_alloc_fmr() 316 fmr->mr.user_base = 0; in ipath_alloc_fmr() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_mr.c | 382 struct qib_fmr *fmr; in qib_alloc_fmr() local 389 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); in qib_alloc_fmr() 390 if (!fmr) in qib_alloc_fmr() 393 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); in qib_alloc_fmr() 401 rval = qib_alloc_lkey(&fmr->mr, 0); in qib_alloc_fmr() 404 fmr->ibfmr.rkey = fmr->mr.lkey; in qib_alloc_fmr() 405 fmr->ibfmr.lkey = fmr->mr.lkey; in qib_alloc_fmr() 410 fmr->mr.access_flags = mr_access_flags; in qib_alloc_fmr() 411 fmr->mr.max_segs = fmr_attr->max_pages; in qib_alloc_fmr() 412 fmr->mr.page_shift = fmr_attr->page_shift; in qib_alloc_fmr() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 678 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) in mthca_free_fmr() argument 680 if (fmr->maps) in mthca_free_fmr() 683 mthca_free_region(dev, fmr->ibmr.lkey); in mthca_free_fmr() 684 mthca_free_mtt(dev, fmr->mtt); in mthca_free_fmr() 689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, in mthca_check_fmr() argument 694 if (list_len > fmr->attr.max_pages) in mthca_check_fmr() 697 page_mask = (1 << fmr->attr.page_shift) - 1; in mthca_check_fmr() 710 if (fmr->maps >= fmr->attr.max_maps) in mthca_check_fmr() 720 struct mthca_fmr *fmr = to_mfmr(ibfmr); in mthca_tavor_map_phys_fmr() local 726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr() [all …]
|
D | mthca_provider.c | 1091 struct mthca_fmr *fmr; in mthca_alloc_fmr() local 1094 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); in mthca_alloc_fmr() 1095 if (!fmr) in mthca_alloc_fmr() 1098 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); in mthca_alloc_fmr() 1100 convert_access(mr_access_flags), fmr); in mthca_alloc_fmr() 1103 kfree(fmr); in mthca_alloc_fmr() 1107 return &fmr->ibmr; in mthca_alloc_fmr() 1110 static int mthca_dealloc_fmr(struct ib_fmr *fmr) in mthca_dealloc_fmr() argument 1112 struct mthca_fmr *mfmr = to_mfmr(fmr); in mthca_dealloc_fmr() 1115 err = mthca_free_fmr(to_mdev(fmr->device), mfmr); in mthca_dealloc_fmr() [all …]
|
D | mthca_dev.h | 482 u32 access, struct mthca_fmr *fmr); 485 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); 488 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); 489 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 972 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, in mlx4_check_fmr() argument 977 if (npages > fmr->max_pages) in mlx4_check_fmr() 980 page_mask = (1 << fmr->page_shift) - 1; in mlx4_check_fmr() 993 if (fmr->maps >= fmr->max_maps) in mlx4_check_fmr() 999 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, in mlx4_map_phys_fmr() argument 1005 err = mlx4_check_fmr(fmr, page_list, npages, iova); in mlx4_map_phys_fmr() 1009 ++fmr->maps; in mlx4_map_phys_fmr() 1011 key = key_to_hw_index(fmr->mr.key); in mlx4_map_phys_fmr() 1013 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); in mlx4_map_phys_fmr() 1015 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; in mlx4_map_phys_fmr() [all …]
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | fmr_ops.c | 64 r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); in fmr_op_init() 65 if (IS_ERR(r->r.fmr)) in fmr_op_init() 74 rc = PTR_ERR(r->r.fmr); in fmr_op_init() 113 rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma); in fmr_op_map() 117 seg1->mr_rkey = mw->r.fmr->rkey; in fmr_op_map() 144 list_add(&seg1->rl_mw->r.fmr->list, &l); in fmr_op_unmap() 174 list_add(&r->r.fmr->list, &list); in fmr_op_reset() 191 rc = ib_dealloc_fmr(r->r.fmr); in fmr_op_destroy()
|
D | xprt_rdma.h | 210 struct ib_fmr *fmr; member
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | mr.c | 438 struct mlx4_ib_fmr *fmr; in mlx4_ib_fmr_alloc() local 441 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); in mlx4_ib_fmr_alloc() 442 if (!fmr) in mlx4_ib_fmr_alloc() 447 fmr_attr->page_shift, &fmr->mfmr); in mlx4_ib_fmr_alloc() 451 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); in mlx4_ib_fmr_alloc() 455 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; in mlx4_ib_fmr_alloc() 457 return &fmr->ibfmr; in mlx4_ib_fmr_alloc() 460 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); in mlx4_ib_fmr_alloc() 463 kfree(fmr); in mlx4_ib_fmr_alloc()
|
D | mlx4_ib.h | 722 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 449 struct ib_pool_fmr *fmr; in iser_reg_page_vec() local 464 fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool, in iser_reg_page_vec() 468 if (IS_ERR(fmr)) { in iser_reg_page_vec() 469 ret = PTR_ERR(fmr); in iser_reg_page_vec() 474 mem_reg->sge.lkey = fmr->fmr->lkey; in iser_reg_page_vec() 475 mem_reg->rkey = fmr->fmr->rkey; in iser_reg_page_vec() 478 mem_reg->mem_h = fmr; in iser_reg_page_vec() 552 err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, in iser_reg_rdma_mem_fmr() 560 ib_conn->fmr.page_vec->data_size, in iser_reg_rdma_mem_fmr() 561 ib_conn->fmr.page_vec->length, in iser_reg_rdma_mem_fmr() [all …]
|
D | iser_verbs.c | 218 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + in iser_create_fmr_pool() 221 if (!ib_conn->fmr.page_vec) in iser_create_fmr_pool() 224 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); in iser_create_fmr_pool() 240 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); in iser_create_fmr_pool() 241 if (!IS_ERR(ib_conn->fmr.pool)) in iser_create_fmr_pool() 245 kfree(ib_conn->fmr.page_vec); in iser_create_fmr_pool() 246 ib_conn->fmr.page_vec = NULL; in iser_create_fmr_pool() 248 ret = PTR_ERR(ib_conn->fmr.pool); in iser_create_fmr_pool() 249 ib_conn->fmr.pool = NULL; in iser_create_fmr_pool() 265 ib_conn, ib_conn->fmr.pool); in iser_free_fmr_pool() [all …]
|
D | iscsi_iser.h | 446 } fmr; member
|
/linux-4.1.27/net/rds/ |
D | ib_rdma.c | 50 struct ib_fmr *fmr; member 341 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, in rds_ib_alloc_fmr() 347 if (IS_ERR(ibmr->fmr)) { in rds_ib_alloc_fmr() 348 err = PTR_ERR(ibmr->fmr); in rds_ib_alloc_fmr() 349 ibmr->fmr = NULL; in rds_ib_alloc_fmr() 359 if (ibmr->fmr) in rds_ib_alloc_fmr() 360 ib_dealloc_fmr(ibmr->fmr); in rds_ib_alloc_fmr() 428 ret = ib_map_phys_fmr(ibmr->fmr, in rds_ib_map_fmr() 633 list_add(&ibmr->fmr->list, &fmr_list); in rds_ib_flush_mr_pool() 646 ib_dealloc_fmr(ibmr->fmr); in rds_ib_flush_mr_pool() [all …]
|
/linux-4.1.27/include/rdma/ |
D | ib_fmr_pool.h | 68 struct ib_fmr *fmr; member 91 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
|
D | ib_verbs.h | 1626 int (*map_phys_fmr)(struct ib_fmr *fmr, 1630 int (*dealloc_fmr)(struct ib_fmr *fmr); 2586 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, in ib_map_phys_fmr() argument 2590 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr() 2603 int ib_dealloc_fmr(struct ib_fmr *fmr);
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 855 int ehca_map_phys_fmr(struct ib_fmr *fmr, in ehca_map_phys_fmr() argument 862 container_of(fmr->device, struct ehca_shca, ib_device); in ehca_map_phys_fmr() 863 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); in ehca_map_phys_fmr() 864 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); in ehca_map_phys_fmr() 869 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", in ehca_map_phys_fmr() 879 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", in ehca_map_phys_fmr() 886 ehca_info(fmr->device, "map limit exceeded, fmr=%p " in ehca_map_phys_fmr() 888 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); in ehca_map_phys_fmr() 897 pginfo.u.fmr.page_list = page_list; in ehca_map_phys_fmr() 900 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; in ehca_map_phys_fmr() [all …]
|
D | ehca_iverbs.h | 109 int ehca_map_phys_fmr(struct ib_fmr *fmr, 114 int ehca_dealloc_fmr(struct ib_fmr *fmr);
|
D | ehca_classes.h | 332 } fmr; member
|
/linux-4.1.27/drivers/mtd/nand/ |
D | fsl_elbc_nand.c | 59 unsigned int fmr; /* FCM Flash Mode Register value */ member 205 out_be32(&lbc->fmr, priv->fmr | 3); in fsl_elbc_run_command() 211 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); in fsl_elbc_run_command() 635 priv->fmr |= al << FMR_AL_SHIFT; in fsl_elbc_chip_init_tail() 683 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? in fsl_elbc_chip_init_tail() 753 priv->fmr = 15 << FMR_CWTO_SHIFT; in fsl_elbc_chip_init() 755 priv->fmr |= FMR_ECCM; in fsl_elbc_chip_init() 784 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? in fsl_elbc_chip_init()
|
/linux-4.1.27/arch/powerpc/math-emu/ |
D | fmr.c | 6 fmr(u32 *frD, u32 *frB) in fmr() function
|
D | Makefile | 12 fmr.o lfd.o stfd.o
|
D | math.c | 69 FLOATFUNC(fmr); 314 case FMR: func = fmr; type = XB; break; in do_mathemu()
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_hw.h | 103 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
|
/linux-4.1.27/include/linux/mlx4/ |
D | device.h | 1325 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 1328 int max_maps, u8 page_shift, struct mlx4_fmr *fmr); 1329 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 1330 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 1332 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
/linux-4.1.27/arch/powerpc/lib/ |
D | ldstfp.S | 49 fmr fr0,reg 68 fmr reg,fr0
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.h | 553 kib_fmr_t fmr; /* FMR */ member 961 int npages, __u64 iov, kib_fmr_t *fmr); 962 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
|
D | o2iblnd.c | 1485 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) in kiblnd_fmr_pool_unmap() argument 1488 kib_fmr_pool_t *fpo = fmr->fmr_pool; in kiblnd_fmr_pool_unmap() 1494 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); in kiblnd_fmr_pool_unmap() 1502 fmr->fmr_pool = NULL; in kiblnd_fmr_pool_unmap() 1503 fmr->fmr_pfmr = NULL; in kiblnd_fmr_pool_unmap() 1525 __u64 iov, kib_fmr_t *fmr) in kiblnd_fmr_pool_map() argument 1543 fmr->fmr_pool = fpo; in kiblnd_fmr_pool_map() 1544 fmr->fmr_pfmr = pfmr; in kiblnd_fmr_pool_map()
|
D | o2iblnd_cb.c | 578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); in kiblnd_fmr_map_tx() 586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : in kiblnd_fmr_map_tx() 587 tx->tx_u.fmr.fmr_pfmr->fmr->lkey; in kiblnd_fmr_map_tx() 638 if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { in kiblnd_unmap_tx() 639 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); in kiblnd_unmap_tx() 640 tx->tx_u.fmr.fmr_pfmr = NULL; in kiblnd_unmap_tx()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | fsl_lbc.h | 178 __be32 fmr; /**< Flash Mode Register */ member
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 1276 struct ib_pool_fmr *fmr; in srp_map_finish_fmr() local 1279 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, in srp_map_finish_fmr() 1281 if (IS_ERR(fmr)) in srp_map_finish_fmr() 1282 return PTR_ERR(fmr); in srp_map_finish_fmr() 1284 *state->next_fmr++ = fmr; in srp_map_finish_fmr() 1287 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey); in srp_map_finish_fmr()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | exceptions-64s.S | 558 #define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
|