Lines Matching refs:mr

59 	struct mlx4_ib_mr *mr;  in mlx4_ib_get_dma_mr()  local
62 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_get_dma_mr()
63 if (!mr) in mlx4_ib_get_dma_mr()
67 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr()
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr()
76 mr->umem = NULL; in mlx4_ib_get_dma_mr()
78 return &mr->ibmr; in mlx4_ib_get_dma_mr()
81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
84 kfree(mr); in mlx4_ib_get_dma_mr()
138 struct mlx4_ib_mr *mr; in mlx4_ib_reg_user_mr() local
143 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_reg_user_mr()
144 if (!mr) in mlx4_ib_reg_user_mr()
149 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr()
151 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
152 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
156 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr()
157 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr()
160 convert_access(access_flags), n, shift, &mr->mmr); in mlx4_ib_reg_user_mr()
164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
168 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
172 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_reg_user_mr()
174 return &mr->ibmr; in mlx4_ib_reg_user_mr()
177 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
180 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
183 kfree(mr); in mlx4_ib_reg_user_mr()
188 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, in mlx4_ib_rereg_user_mr() argument
193 struct mlx4_ib_dev *dev = to_mdev(mr->device); in mlx4_ib_rereg_user_mr()
194 struct mlx4_ib_mr *mmr = to_mmr(mr); in mlx4_ib_rereg_user_mr()
230 mmr->umem = ib_umem_get(mr->uobject->context, start, length, in mlx4_ib_rereg_user_mr()
276 struct mlx4_ib_mr *mr, in mlx4_alloc_priv_pages() argument
285 mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx4_alloc_priv_pages()
286 if (!mr->pages_alloc) in mlx4_alloc_priv_pages()
289 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); in mlx4_alloc_priv_pages()
291 mr->page_map = dma_map_single(device->dma_device, mr->pages, in mlx4_alloc_priv_pages()
294 if (dma_mapping_error(device->dma_device, mr->page_map)) { in mlx4_alloc_priv_pages()
301 kfree(mr->pages_alloc); in mlx4_alloc_priv_pages()
307 mlx4_free_priv_pages(struct mlx4_ib_mr *mr) in mlx4_free_priv_pages() argument
309 if (mr->pages) { in mlx4_free_priv_pages()
310 struct ib_device *device = mr->ibmr.device; in mlx4_free_priv_pages()
311 int size = mr->max_pages * sizeof(u64); in mlx4_free_priv_pages()
313 dma_unmap_single(device->dma_device, mr->page_map, in mlx4_free_priv_pages()
315 kfree(mr->pages_alloc); in mlx4_free_priv_pages()
316 mr->pages = NULL; in mlx4_free_priv_pages()
322 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_dereg_mr() local
325 mlx4_free_priv_pages(mr); in mlx4_ib_dereg_mr()
327 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); in mlx4_ib_dereg_mr()
330 if (mr->umem) in mlx4_ib_dereg_mr()
331 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
332 kfree(mr); in mlx4_ib_dereg_mr()
406 struct mlx4_ib_mr *mr; in mlx4_ib_alloc_mr() local
413 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_alloc_mr()
414 if (!mr) in mlx4_ib_alloc_mr()
418 max_num_sg, 0, &mr->mmr); in mlx4_ib_alloc_mr()
422 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); in mlx4_ib_alloc_mr()
426 mr->max_pages = max_num_sg; in mlx4_ib_alloc_mr()
428 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
432 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_alloc_mr()
433 mr->umem = NULL; in mlx4_ib_alloc_mr()
435 return &mr->ibmr; in mlx4_ib_alloc_mr()
438 mlx4_free_priv_pages(mr); in mlx4_ib_alloc_mr()
440 (void) mlx4_mr_free(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
442 kfree(mr); in mlx4_ib_alloc_mr()
467 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; in mlx4_ib_fmr_alloc()
472 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); in mlx4_ib_fmr_alloc()
541 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_set_page() local
543 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
546 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); in mlx4_set_page()
555 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_map_mr_sg() local
558 mr->npages = 0; in mlx4_ib_map_mr_sg()
560 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
561 sizeof(u64) * mr->max_pages, in mlx4_ib_map_mr_sg()
566 ib_dma_sync_single_for_device(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
567 sizeof(u64) * mr->max_pages, in mlx4_ib_map_mr_sg()