Lines Matching refs:mr
42 struct qib_mregion mr; /* must be last */ member
50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, in init_qib_mregion() argument
58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); in init_qib_mregion()
59 if (!mr->map[i]) in init_qib_mregion()
62 mr->mapsz = m; in init_qib_mregion()
63 init_completion(&mr->comp); in init_qib_mregion()
65 atomic_set(&mr->refcount, 1); in init_qib_mregion()
66 mr->pd = pd; in init_qib_mregion()
67 mr->max_segs = count; in init_qib_mregion()
72 kfree(mr->map[--i]); in init_qib_mregion()
77 static void deinit_qib_mregion(struct qib_mregion *mr) in deinit_qib_mregion() argument
79 int i = mr->mapsz; in deinit_qib_mregion()
81 mr->mapsz = 0; in deinit_qib_mregion()
83 kfree(mr->map[--i]); in deinit_qib_mregion()
98 struct qib_mr *mr = NULL; in qib_get_dma_mr() local
107 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in qib_get_dma_mr()
108 if (!mr) { in qib_get_dma_mr()
113 rval = init_qib_mregion(&mr->mr, pd, 0); in qib_get_dma_mr()
120 rval = qib_alloc_lkey(&mr->mr, 1); in qib_get_dma_mr()
126 mr->mr.access_flags = acc; in qib_get_dma_mr()
127 ret = &mr->ibmr; in qib_get_dma_mr()
132 deinit_qib_mregion(&mr->mr); in qib_get_dma_mr()
134 kfree(mr); in qib_get_dma_mr()
140 struct qib_mr *mr; in alloc_mr() local
146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); in alloc_mr()
147 if (!mr) in alloc_mr()
150 rval = init_qib_mregion(&mr->mr, pd, count); in alloc_mr()
157 rval = qib_alloc_lkey(&mr->mr, 0); in alloc_mr()
160 mr->ibmr.lkey = mr->mr.lkey; in alloc_mr()
161 mr->ibmr.rkey = mr->mr.lkey; in alloc_mr()
163 return mr; in alloc_mr()
166 deinit_qib_mregion(&mr->mr); in alloc_mr()
168 kfree(mr); in alloc_mr()
169 mr = ERR_PTR(rval); in alloc_mr()
186 struct qib_mr *mr; in qib_reg_phys_mr() local
190 mr = alloc_mr(num_phys_buf, pd); in qib_reg_phys_mr()
191 if (IS_ERR(mr)) { in qib_reg_phys_mr()
192 ret = (struct ib_mr *)mr; in qib_reg_phys_mr()
196 mr->mr.user_base = *iova_start; in qib_reg_phys_mr()
197 mr->mr.iova = *iova_start; in qib_reg_phys_mr()
198 mr->mr.access_flags = acc; in qib_reg_phys_mr()
203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in qib_reg_phys_mr()
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in qib_reg_phys_mr()
205 mr->mr.length += buffer_list[i].size; in qib_reg_phys_mr()
213 ret = &mr->ibmr; in qib_reg_phys_mr()
233 struct qib_mr *mr; in qib_reg_user_mr() local
251 mr = alloc_mr(n, pd); in qib_reg_user_mr()
252 if (IS_ERR(mr)) { in qib_reg_user_mr()
253 ret = (struct ib_mr *)mr; in qib_reg_user_mr()
258 mr->mr.user_base = start; in qib_reg_user_mr()
259 mr->mr.iova = virt_addr; in qib_reg_user_mr()
260 mr->mr.length = length; in qib_reg_user_mr()
261 mr->mr.offset = ib_umem_offset(umem); in qib_reg_user_mr()
262 mr->mr.access_flags = mr_access_flags; in qib_reg_user_mr()
263 mr->umem = umem; in qib_reg_user_mr()
266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr()
277 mr->mr.map[m]->segs[n].vaddr = vaddr; in qib_reg_user_mr()
278 mr->mr.map[m]->segs[n].length = umem->page_size; in qib_reg_user_mr()
285 ret = &mr->ibmr; in qib_reg_user_mr()
302 struct qib_mr *mr = to_imr(ibmr); in qib_dereg_mr() local
306 kfree(mr->pages); in qib_dereg_mr()
307 qib_free_lkey(&mr->mr); in qib_dereg_mr()
309 qib_put_mr(&mr->mr); /* will set completion if last */ in qib_dereg_mr()
310 timeout = wait_for_completion_timeout(&mr->mr.comp, in qib_dereg_mr()
313 qib_get_mr(&mr->mr); in qib_dereg_mr()
317 deinit_qib_mregion(&mr->mr); in qib_dereg_mr()
318 if (mr->umem) in qib_dereg_mr()
319 ib_umem_release(mr->umem); in qib_dereg_mr()
320 kfree(mr); in qib_dereg_mr()
335 struct qib_mr *mr; in qib_alloc_mr() local
340 mr = alloc_mr(max_num_sg, pd); in qib_alloc_mr()
341 if (IS_ERR(mr)) in qib_alloc_mr()
342 return (struct ib_mr *)mr; in qib_alloc_mr()
344 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); in qib_alloc_mr()
345 if (!mr->pages) in qib_alloc_mr()
348 return &mr->ibmr; in qib_alloc_mr()
351 qib_dereg_mr(&mr->ibmr); in qib_alloc_mr()
357 struct qib_mr *mr = to_imr(ibmr); in qib_set_page() local
359 if (unlikely(mr->npages == mr->mr.max_segs)) in qib_set_page()
362 mr->pages[mr->npages++] = addr; in qib_set_page()
371 struct qib_mr *mr = to_imr(ibmr); in qib_map_mr_sg() local
373 mr->npages = 0; in qib_map_mr_sg()
396 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); in qib_alloc_fmr()
400 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); in qib_alloc_fmr()
408 rval = qib_alloc_lkey(&fmr->mr, 0); in qib_alloc_fmr()
411 fmr->ibfmr.rkey = fmr->mr.lkey; in qib_alloc_fmr()
412 fmr->ibfmr.lkey = fmr->mr.lkey; in qib_alloc_fmr()
417 fmr->mr.access_flags = mr_access_flags; in qib_alloc_fmr()
418 fmr->mr.max_segs = fmr_attr->max_pages; in qib_alloc_fmr()
419 fmr->mr.page_shift = fmr_attr->page_shift; in qib_alloc_fmr()
426 deinit_qib_mregion(&fmr->mr); in qib_alloc_fmr()
453 i = atomic_read(&fmr->mr.refcount); in qib_map_phys_fmr()
457 if (list_len > fmr->mr.max_segs) { in qib_map_phys_fmr()
463 fmr->mr.user_base = iova; in qib_map_phys_fmr()
464 fmr->mr.iova = iova; in qib_map_phys_fmr()
465 ps = 1 << fmr->mr.page_shift; in qib_map_phys_fmr()
466 fmr->mr.length = list_len * ps; in qib_map_phys_fmr()
470 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
471 fmr->mr.map[m]->segs[n].length = ps; in qib_map_phys_fmr()
499 fmr->mr.user_base = 0; in qib_unmap_fmr()
500 fmr->mr.iova = 0; in qib_unmap_fmr()
501 fmr->mr.length = 0; in qib_unmap_fmr()
519 qib_free_lkey(&fmr->mr); in qib_dealloc_fmr()
520 qib_put_mr(&fmr->mr); /* will set completion if last */ in qib_dealloc_fmr()
521 timeout = wait_for_completion_timeout(&fmr->mr.comp, in qib_dealloc_fmr()
524 qib_get_mr(&fmr->mr); in qib_dealloc_fmr()
528 deinit_qib_mregion(&fmr->mr); in qib_dealloc_fmr()
536 struct qib_mregion *mr = container_of(list, struct qib_mregion, list); in mr_rcu_callback() local
538 complete(&mr->comp); in mr_rcu_callback()