Lines Matching refs:bcnt
553 u64 user_va, size_t bcnt, u32 *bytes_mapped, in pagefault_real_mr() argument
573 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); in pagefault_real_mr()
595 *bytes_mapped += min_t(u32, new_mappings, bcnt); in pagefault_real_mr()
606 size_t bcnt, u32 *bytes_mapped, u32 flags) in pagefault_implicit_mr() argument
608 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT; in pagefault_implicit_mr()
616 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt)) in pagefault_implicit_mr()
620 while (bcnt) { in pagefault_implicit_mr()
643 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) - in pagefault_implicit_mr()
654 bcnt -= len; in pagefault_implicit_mr()
690 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, in pagefault_dmabuf_mr() argument
723 *bytes_mapped += bcnt; in pagefault_dmabuf_mr()
737 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, in pagefault_mr() argument
746 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); in pagefault_mr()
755 ib_umem_end(odp) - user_va < bcnt)) in pagefault_mr()
757 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, in pagefault_mr()
760 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, in pagefault_mr()
788 size_t bcnt; member
815 u64 io_virt, size_t bcnt, in pagefault_single_data_segment() argument
828 bcnt -= *bytes_committed; in pagefault_single_data_segment()
840 *bytes_mapped += bcnt; in pagefault_single_data_segment()
862 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0); in pagefault_single_data_segment()
903 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { in pagefault_single_data_segment()
917 frame->bcnt = min_t(size_t, bcnt, in pagefault_single_data_segment()
923 bcnt -= frame->bcnt; in pagefault_single_data_segment()
940 bcnt = frame->bcnt; in pagefault_single_data_segment()
991 size_t bcnt; in pagefault_data_segments() local
1006 bcnt = byte_count & ~MLX5_INLINE_SEG; in pagefault_data_segments()
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; in pagefault_data_segments()
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, in pagefault_data_segments()
1017 if (receive_queue && bcnt == 0 && in pagefault_data_segments()
1023 *total_wqe_bytes += bcnt - min_t(size_t, bcnt, in pagefault_data_segments()
1028 if (bcnt == 0) in pagefault_data_segments()
1029 bcnt = 1U << 31; in pagefault_data_segments()
1031 if (inline_segment || bcnt <= pfault->bytes_committed) { in pagefault_data_segments()
1033 min_t(size_t, bcnt, in pagefault_data_segments()
1039 io_virt, bcnt, in pagefault_data_segments()