Lines Matching refs:mr
49 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range()
62 if (!mr || !mr->ibmr.pd) in mlx5_ib_invalidate_range()
93 mlx5_ib_update_mtt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
100 mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, in mlx5_ib_invalidate_range()
153 struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr); in mlx5_ib_odp_find_mr_lkey() local
155 if (!mmr || mmr->key != key || !mr->live) in mlx5_ib_odp_find_mr_lkey()
195 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
199 mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key); in pagefault_single_data_segment()
205 if (!mr || !mr->ibmr.pd) { in pagefault_single_data_segment()
211 if (!mr->umem->odp_data) { in pagefault_single_data_segment()
219 if (mr->ibmr.pd != qp->ibqp.pd) { in pagefault_single_data_segment()
225 current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq); in pagefault_single_data_segment()
240 start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; in pagefault_single_data_segment()
242 if (mr->umem->writable) in pagefault_single_data_segment()
244 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
252 mutex_lock(&mr->umem->odp_data->umem_mutex); in pagefault_single_data_segment()
253 if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) { in pagefault_single_data_segment()
259 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); in pagefault_single_data_segment()
263 mutex_unlock(&mr->umem->odp_data->umem_mutex); in pagefault_single_data_segment()
279 if (!mr->umem->odp_data->dying) { in pagefault_single_data_segment()
280 struct ib_umem_odp *odp_data = mr->umem->odp_data; in pagefault_single_data_segment()