Lines Matching refs:odp

161 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);  in populate_mtt()  local
169 pa = odp->dma_list[idx + i]; in populate_mtt()
196 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr_work() local
202 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0, in free_implicit_child_mr_work()
212 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in destroy_unused_implicit_child_mr() local
213 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; in destroy_unused_implicit_child_mr()
409 struct ib_umem_odp *odp; in implicit_get_child_mr() local
414 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem), in implicit_get_child_mr()
417 if (IS_ERR(odp)) in implicit_get_child_mr()
418 return ERR_CAST(odp); in implicit_get_child_mr()
424 ib_umem_odp_release(odp); in implicit_get_child_mr()
431 mr->umem = &odp->umem; in implicit_get_child_mr()
436 odp->private = mr; in implicit_get_child_mr()
552 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, in pagefault_real_mr() argument
566 page_shift = odp->page_shift; in pagefault_real_mr()
567 start_idx = (user_va - ib_umem_start(odp)) >> page_shift; in pagefault_real_mr()
570 if (odp->umem.writable && !downgrade) in pagefault_real_mr()
573 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); in pagefault_real_mr()
582 mutex_unlock(&odp->umem_mutex); in pagefault_real_mr()
740 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in pagefault_mr() local
748 if (!odp->is_implicit_odp) { in pagefault_mr()
752 (u64)odp->umem.address, &user_va)) in pagefault_mr()
754 if (unlikely(user_va >= ib_umem_end(odp) || in pagefault_mr()
755 ib_umem_end(odp) - user_va < bcnt)) in pagefault_mr()
757 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, in pagefault_mr()
760 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, in pagefault_mr()