Lines Matching refs:mr
49 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) in qib_alloc_lkey() argument
55 struct qib_ibdev *dev = to_idev(mr->pd->device); in qib_alloc_lkey()
66 qib_get_mr(mr); in qib_alloc_lkey()
67 rcu_assign_pointer(dev->dma_mr, mr); in qib_alloc_lkey()
68 mr->lkey_published = 1; in qib_alloc_lkey()
93 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | in qib_alloc_lkey()
96 if (mr->lkey == 0) { in qib_alloc_lkey()
97 mr->lkey |= 1 << 8; in qib_alloc_lkey()
100 qib_get_mr(mr); in qib_alloc_lkey()
101 rcu_assign_pointer(rkt->table[r], mr); in qib_alloc_lkey()
102 mr->lkey_published = 1; in qib_alloc_lkey()
117 void qib_free_lkey(struct qib_mregion *mr) in qib_free_lkey() argument
120 u32 lkey = mr->lkey; in qib_free_lkey()
122 struct qib_ibdev *dev = to_idev(mr->pd->device); in qib_free_lkey()
126 if (!mr->lkey_published) in qib_free_lkey()
134 qib_put_mr(mr); in qib_free_lkey()
135 mr->lkey_published = 0; in qib_free_lkey()
158 struct qib_mregion *mr; in qib_lkey_ok() local
172 mr = rcu_dereference(dev->dma_mr); in qib_lkey_ok()
173 if (!mr) in qib_lkey_ok()
175 if (unlikely(!atomic_inc_not_zero(&mr->refcount))) in qib_lkey_ok()
179 isge->mr = mr; in qib_lkey_ok()
187 mr = rcu_dereference( in qib_lkey_ok()
189 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) in qib_lkey_ok()
192 off = sge->addr - mr->user_base; in qib_lkey_ok()
193 if (unlikely(sge->addr < mr->user_base || in qib_lkey_ok()
194 off + sge->length > mr->length || in qib_lkey_ok()
195 (mr->access_flags & acc) != acc)) in qib_lkey_ok()
197 if (unlikely(!atomic_inc_not_zero(&mr->refcount))) in qib_lkey_ok()
201 off += mr->offset; in qib_lkey_ok()
202 if (mr->page_shift) { in qib_lkey_ok()
210 entries_spanned_by_off = off >> mr->page_shift; in qib_lkey_ok()
211 off -= (entries_spanned_by_off << mr->page_shift); in qib_lkey_ok()
217 while (off >= mr->map[m]->segs[n].length) { in qib_lkey_ok()
218 off -= mr->map[m]->segs[n].length; in qib_lkey_ok()
226 isge->mr = mr; in qib_lkey_ok()
227 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_lkey_ok()
228 isge->length = mr->map[m]->segs[n].length - off; in qib_lkey_ok()
256 struct qib_mregion *mr; in qib_rkey_ok() local
271 mr = rcu_dereference(dev->dma_mr); in qib_rkey_ok()
272 if (!mr) in qib_rkey_ok()
274 if (unlikely(!atomic_inc_not_zero(&mr->refcount))) in qib_rkey_ok()
278 sge->mr = mr; in qib_rkey_ok()
287 mr = rcu_dereference( in qib_rkey_ok()
289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in qib_rkey_ok()
292 off = vaddr - mr->iova; in qib_rkey_ok()
293 if (unlikely(vaddr < mr->iova || off + len > mr->length || in qib_rkey_ok()
294 (mr->access_flags & acc) == 0)) in qib_rkey_ok()
296 if (unlikely(!atomic_inc_not_zero(&mr->refcount))) in qib_rkey_ok()
300 off += mr->offset; in qib_rkey_ok()
301 if (mr->page_shift) { in qib_rkey_ok()
309 entries_spanned_by_off = off >> mr->page_shift; in qib_rkey_ok()
310 off -= (entries_spanned_by_off << mr->page_shift); in qib_rkey_ok()
316 while (off >= mr->map[m]->segs[n].length) { in qib_rkey_ok()
317 off -= mr->map[m]->segs[n].length; in qib_rkey_ok()
325 sge->mr = mr; in qib_rkey_ok()
326 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_rkey_ok()
327 sge->length = mr->map[m]->segs[n].length - off; in qib_rkey_ok()
345 struct qib_mregion *mr; in qib_fast_reg_mr() local
357 mr = rcu_dereference_protected( in qib_fast_reg_mr()
360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) in qib_fast_reg_mr()
363 if (wr->wr.fast_reg.page_list_len > mr->max_segs) in qib_fast_reg_mr()
370 mr->user_base = wr->wr.fast_reg.iova_start; in qib_fast_reg_mr()
371 mr->iova = wr->wr.fast_reg.iova_start; in qib_fast_reg_mr()
372 mr->lkey = rkey; in qib_fast_reg_mr()
373 mr->length = wr->wr.fast_reg.length; in qib_fast_reg_mr()
374 mr->access_flags = wr->wr.fast_reg.access_flags; in qib_fast_reg_mr()
379 mr->map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_fast_reg_mr()
380 mr->map[m]->segs[n].length = ps; in qib_fast_reg_mr()