| /drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | params.c | 47 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwrq_umr_mode() local 52 oversized = xsk->chunk_size < (1 << page_shift); in mlx5e_mpwrq_umr_mode() 53 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); in mlx5e_mpwrq_umr_mode() 133 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; in mlx5e_mpwrq_pages_per_wqe() 329 u8 page_shift, in mlx5e_verify_rx_mpwqe_strides() argument 360 page_shift, umr_mode); in mlx5e_verify_params_rx_mpwqe_strides() 385 log_num_strides, page_shift, in mlx5e_rx_mpwqe_is_linear_skb() 394 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; in mlx5e_mpwqe_get_log_rq_size() local 397 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwqe_get_log_rq_size() 545 page_shift, umr_mode); in mlx5e_mpwrq_validate_xsk() [all …]
|
| A D | params.h | 62 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 64 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 66 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 68 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, 70 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 74 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
|
| /drivers/infiniband/hw/hns/ |
| A D | hns_roce_alloc.c | 67 u32 page_shift, u32 flags) in hns_roce_buf_alloc() argument 76 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT)) in hns_roce_buf_alloc() 84 buf->page_shift = page_shift; in hns_roce_buf_alloc() 85 page_size = 1 << buf->page_shift; in hns_roce_buf_alloc() 134 unsigned int page_shift) in hns_roce_get_kmem_bufs() argument 140 if (page_shift > buf->trunk_shift) { in hns_roce_get_kmem_bufs() 142 page_shift, buf->trunk_shift); in hns_roce_get_kmem_bufs() 150 offset += (1 << page_shift); in hns_roce_get_kmem_bufs() 157 unsigned int page_shift) in hns_roce_get_umem_bufs() argument 163 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs()
|
| A D | hns_roce_mr.c | 100 buf_attr.page_shift = is_fast ? PAGE_SHIFT : in alloc_mr_pbl() 556 unsigned int page_shift) in mtr_check_direct_pages() argument 558 size_t page_size = 1 << page_shift; in mtr_check_direct_pages() 604 buf_attr->page_shift, in mtr_alloc_bufs() 645 unsigned int page_shift; in mtr_map_bufs() local 659 mtr->umem, page_shift); in mtr_map_bufs() 662 mtr->kmem, page_shift); in mtr_map_bufs() 839 buf_attr->page_shift = order_base_2(page_sz); in get_best_page_shift() 896 attr->region_count, attr->page_shift); in is_buf_attr_valid() 928 buf_pg_sz = 1 << attr->page_shift; in mtr_init_buf_cfg() [all …]
|
| A D | hns_roce_trace.h | 152 __field(unsigned int, page_shift) 163 __entry->page_shift = attr->page_shift; 168 __entry->region_count, __entry->page_shift,
|
| A D | hns_roce_device.h | 293 unsigned int page_shift; /* buffer page shift */ member 384 unsigned int page_shift; member 1119 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift); in hns_roce_buf_page() 1129 static inline u32 to_hr_hw_page_shift(u32 page_shift) in to_hr_hw_page_shift() argument 1131 return page_shift - HNS_HW_PAGE_SHIFT; in to_hr_hw_page_shift() 1189 unsigned int page_shift, struct ib_udata *udata, 1237 u32 page_shift, u32 flags); 1241 unsigned int page_shift); 1244 unsigned int page_shift);
|
| /drivers/infiniband/core/ |
| A D | umem_odp.c | 62 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 104 1 << umem_odp->page_shift); in ib_init_umem_odp() 150 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_implicit() 193 odp_data->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_child() 255 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_get() 258 umem_odp->page_shift = HPAGE_SHIFT; in ib_umem_odp_get() 331 unsigned int page_shift, hmm_order, pfn_start_idx; in ib_umem_odp_map_dma_and_lock() local 340 page_shift = umem_odp->page_shift; in ib_umem_odp_map_dma_and_lock() 409 if (hmm_order + PAGE_SHIFT < page_shift) { in ib_umem_odp_map_dma_and_lock() 413 __func__, hmm_order, page_shift); in ib_umem_odp_map_dma_and_lock() [all …]
|
| /drivers/pci/endpoint/ |
| A D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order() local 29 size >>= page_shift; in pci_epc_mem_get_order() 53 unsigned int page_shift; in pci_epc_multi_mem_init() local 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 74 pages = windows[i].size >> page_shift; in pci_epc_multi_mem_init() 183 unsigned int page_shift; in pci_epc_mem_alloc_addr() local 201 page_shift = ilog2(mem->window.page_size); in pci_epc_mem_alloc_addr() 203 ((phys_addr_t)pageno << page_shift); in pci_epc_mem_alloc_addr() 251 unsigned int page_shift; in pci_epc_mem_free_addr() local 263 page_shift = ilog2(page_size); in pci_epc_mem_free_addr() [all …]
|
| /drivers/infiniband/hw/mlx5/ |
| A D | umr.c | 606 unsigned int page_shift) in mlx5r_umr_set_update_xlt_mkey_seg() argument 843 int page_shift, int flags) in mlx5r_umr_update_xlt() argument 935 unsigned int page_shift, in mlx5r_umr_update_mr_page_shift() argument 964 mr->page_shift = page_shift; in mlx5r_umr_update_mr_page_shift() 995 unsigned int page_shift, in _mlx5r_umr_zap_mkey() argument 1024 1UL << page_shift); in _mlx5r_umr_zap_mkey() 1036 mr->page_shift = max_page_shift; in _mlx5r_umr_zap_mkey() 1039 mr->page_shift = old_page_shift; in _mlx5r_umr_zap_mkey() 1082 unsigned int page_shift) in mlx5r_umr_dmabuf_update_pgsz() argument 1096 mr->page_shift = page_shift; in mlx5r_umr_dmabuf_update_pgsz() [all …]
|
| A D | umr.h | 106 int page_shift, int flags); 108 unsigned int page_shift, 111 unsigned int page_shift);
|
| A D | odp.c | 699 int page_shift, ret, np; in pagefault_real_mr() local 712 page_shift = odp->page_shift; in pagefault_real_mr() 737 u32 new_mappings = (np << page_shift) - in pagefault_real_mr() 743 return np << (page_shift - PAGE_SHIFT); in pagefault_real_mr() 841 unsigned int old_page_shift = mr->page_shift; in pagefault_dmabuf_mr() 842 unsigned int page_shift; in pagefault_dmabuf_mr() local 862 page_shift = order_base_2(page_size); in pagefault_dmabuf_mr() 863 if (page_shift != mr->page_shift && mr->dmabuf_faulted) { in pagefault_dmabuf_mr() 865 page_shift); in pagefault_dmabuf_mr() 867 mr->page_shift = page_shift; in pagefault_dmabuf_mr() [all …]
|
| A D | mr.c | 1103 u64 page_size = 1ULL << page_shift; in get_octo_len() 1189 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr() 1281 mr->page_shift = order_base_2(page_size); in reg_create() 1325 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create() 1326 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create() 1860 mr->page_shift = order_base_2(page_size); in umr_rereg_pas() 2283 int access_mode, int page_shift) in mlx5_set_umr_free_mkey() argument 2297 MLX5_SET(mkc, mkc, log_page_size, page_shift); in mlx5_set_umr_free_mkey() 2304 int ndescs, int desc_size, int page_shift, in _mlx5_alloc_mkey_descs() argument 2341 int page_shift = 0; in mlx5_ib_alloc_pi_mr() local [all …]
|
| A D | cq.c | 923 cq->buf.frag_buf.page_shift - in create_cq_kernel() 1292 unsigned int page_shift; in mlx5_ib_resize_cq() local 1334 page_shift = order_base_2(page_size); in mlx5_ib_resize_cq() 1344 page_shift = frag_buf->page_shift; in mlx5_ib_resize_cq() 1358 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas, in mlx5_ib_resize_cq() 1372 page_shift - MLX5_ADAPTER_PAGE_SHIFT); in mlx5_ib_resize_cq()
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ |
| A D | vmm.c | 78 u8 page_shift = 29; /* 512MiB */ in r535_mmu_vaspace_new() local 79 const u64 page_size = BIT_ULL(page_shift); in r535_mmu_vaspace_new() 85 if (page->shift == page_shift) in r535_mmu_vaspace_new() 93 ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size, in r535_mmu_vaspace_new() 116 page_shift += desc->bits; in r535_mmu_vaspace_new() 121 page_shift -= desc->bits; in r535_mmu_vaspace_new() 126 ctrl->levels[i].pageShift = page_shift; in r535_mmu_vaspace_new()
|
| A D | bar.c | 53 r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe) in r535_bar_bar2_update_pde() argument 63 rpc->info.entryLevelShift = page_shift; in r535_bar_bar2_update_pde()
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| A D | mr.c | 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 204 mtt->page_shift = page_shift; in mlx4_mtt_init() 419 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 591 int page_shift, struct mlx4_mpt_entry *mpt_entry) in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 601 mpt_entry->entity_size = cpu_to_be32(page_shift); in mlx4_mr_rereg_mem_write() 610 if (mr->mtt.page_shift == 0) in mlx4_mr_rereg_mem_write() 613 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { in mlx4_mr_rereg_mem_write() [all …]
|
| A D | alloc.c | 562 buf->page_shift = get_order(size) + PAGE_SHIFT; in mlx4_buf_direct_alloc() 571 while (t & ((1 << buf->page_shift) - 1)) { in mlx4_buf_direct_alloc() 572 --buf->page_shift; in mlx4_buf_direct_alloc() 596 buf->page_shift = PAGE_SHIFT; in mlx4_buf_alloc() 767 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
|
| A D | cq.c | 205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 339 1UL << buf->page_shift); in mlx4_init_kernel_cqes() 385 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc()
|
| /drivers/mtd/nand/raw/ |
| A D | nand_bbt.c | 180 from = ((loff_t)page) << this->page_shift; in read_bbt() 561 int blocktopage = this->bbt_erase_shift - this->page_shift; in search_bbt() 666 (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 694 page = block << (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 818 to = ((loff_t)page) << this->page_shift; in write_bbt() 834 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; in write_bbt() 841 pageoffs = page - (int)(to >> this->page_shift); in write_bbt() 842 offs = pageoffs << this->page_shift; in write_bbt() 869 (len >> this->page_shift)* mtd->oobsize); in write_bbt() 1087 len += (len >> this->page_shift) * mtd->oobsize; in nand_update_bbt() [all …]
|
| /drivers/infiniband/sw/rdmavt/ |
| A D | mr.c | 373 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr() 565 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page() 566 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page() 600 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg() 777 if (mr->page_shift) { in rvt_lkey_ok() 785 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok() 786 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok() 884 if (mr->page_shift) { in rvt_rkey_ok() 892 entries_spanned_by_off = off >> mr->page_shift; in rvt_rkey_ok() 893 off -= (entries_spanned_by_off << mr->page_shift); in rvt_rkey_ok()
|
| /drivers/infiniband/sw/rxe/ |
| A D | rxe_odp.c | 134 addr = iova & (~(BIT(umem_odp->page_shift) - 1)); in rxe_check_pagefault() 138 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in rxe_check_pagefault() 145 addr += BIT(umem_odp->page_shift); in rxe_check_pagefault() 152 return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in rxe_odp_iova_to_index() 157 return iova & (BIT(umem_odp->page_shift) - 1); in rxe_odp_iova_to_page_offset() 210 bytes = BIT(umem_odp->page_shift) - offset; in __rxe_odp_mr_copy()
|
| A D | rxe_mr.c | 63 mr->page_shift = PAGE_SHIFT; in rxe_mr_init() 77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index() 238 mr->page_shift = ilog2(page_size); in rxe_map_mr_sg()
|
| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | alloc.c | 81 buf->page_shift = PAGE_SHIFT; in mlx5_frag_buf_alloc_node() 95 if (frag->map & ((1 << buf->page_shift) - 1)) { in mlx5_frag_buf_alloc_node() 99 &frag->map, buf->page_shift); in mlx5_frag_buf_alloc_node()
|
| /drivers/vfio/ |
| A D | vfio_iommu_spapr_tce.c | 195 return page_shift(compound_head(page)) >= it_page_shift; in tce_page_is_contained() 609 __u32 page_shift, in tce_iommu_create_table() argument 616 table_size = table_group->ops->get_table_size(page_shift, window_size, in tce_iommu_create_table() 626 page_shift, window_size, levels, ptbl); in tce_iommu_create_table() 644 __u32 page_shift, __u64 window_size, __u32 levels, in tce_iommu_create_window() argument 663 if (!(table_group->pgsizes & (1ULL << page_shift))) in tce_iommu_create_window() 673 page_shift, window_size, levels, &tbl); in tce_iommu_create_window() 1136 create.page_shift, in tce_iommu_ioctl()
|
| /drivers/nvme/target/ |
| A D | passthru.c | 87 int page_shift; in nvmet_passthru_override_id_ctrl() local 115 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvmet_passthru_override_id_ctrl() 117 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
|