Lines Matching refs:mr
49 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range()
62 if (!mr || !mr->ibmr.pd) in mlx5_ib_invalidate_range()
93 mlx5_ib_update_mtt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
100 mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, in mlx5_ib_invalidate_range()
146 struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr); in mlx5_ib_odp_find_mr_lkey() local
148 if (!mmr || mmr->key != key || !mr->live) in mlx5_ib_odp_find_mr_lkey()
188 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
192 mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key); in pagefault_single_data_segment()
198 if (!mr || !mr->ibmr.pd) { in pagefault_single_data_segment()
204 if (!mr->umem->odp_data) { in pagefault_single_data_segment()
212 if (mr->ibmr.pd != qp->ibqp.pd) { in pagefault_single_data_segment()
218 current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq); in pagefault_single_data_segment()
233 start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; in pagefault_single_data_segment()
235 if (mr->umem->writable) in pagefault_single_data_segment()
237 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
245 mutex_lock(&mr->umem->odp_data->umem_mutex); in pagefault_single_data_segment()
246 if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) { in pagefault_single_data_segment()
252 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); in pagefault_single_data_segment()
256 mutex_unlock(&mr->umem->odp_data->umem_mutex); in pagefault_single_data_segment()
272 if (!mr->umem->odp_data->dying) { in pagefault_single_data_segment()
273 struct ib_umem_odp *odp_data = mr->umem->odp_data; in pagefault_single_data_segment()