| /linux/drivers/infiniband/hw/hns/ |
| A D | hns_roce_alloc.c | 67 u32 page_shift, u32 flags) in hns_roce_buf_alloc() argument 76 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT)) in hns_roce_buf_alloc() 84 buf->page_shift = page_shift; in hns_roce_buf_alloc() 85 page_size = 1 << buf->page_shift; in hns_roce_buf_alloc() 134 unsigned int page_shift) in hns_roce_get_kmem_bufs() argument 140 if (page_shift > buf->trunk_shift) { in hns_roce_get_kmem_bufs() 142 page_shift, buf->trunk_shift); in hns_roce_get_kmem_bufs() 150 offset += (1 << page_shift); in hns_roce_get_kmem_bufs() 157 unsigned int page_shift) in hns_roce_get_umem_bufs() argument 163 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs()
|
| A D | hns_roce_mr.c | 99 buf_attr.page_shift = is_fast ? PAGE_SHIFT : in alloc_mr_pbl() 661 unsigned int page_shift) in mtr_check_direct_pages() argument 663 size_t page_size = 1 << page_shift; in mtr_check_direct_pages() 709 buf_attr->page_shift, in mtr_alloc_bufs() 750 unsigned int page_shift; in mtr_map_bufs() local 764 mtr->umem, page_shift); in mtr_map_bufs() 767 mtr->kmem, page_shift); in mtr_map_bufs() 949 buf_attr->page_shift = order_base_2(page_sz); in get_best_page_shift() 1006 attr->region_count, attr->page_shift); in is_buf_attr_valid() 1038 buf_pg_sz = 1 << attr->page_shift; in mtr_init_buf_cfg() [all …]
|
| /linux/drivers/infiniband/core/ |
| A D | umem_odp.c | 59 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 73 ndmas = (end - start) >> umem_odp->page_shift; in ib_init_umem_odp() 134 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_implicit() 182 odp_data->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_child() 244 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_get() 247 umem_odp->page_shift = HPAGE_SHIFT; in ib_umem_odp_get() 353 unsigned int page_shift, hmm_order, pfn_start_idx; in ib_umem_odp_map_dma_and_lock() local 365 page_shift = umem_odp->page_shift; in ib_umem_odp_map_dma_and_lock() 439 if (hmm_order + PAGE_SHIFT < page_shift) { in ib_umem_odp_map_dma_and_lock() 443 __func__, hmm_order, page_shift); in ib_umem_odp_map_dma_and_lock() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | params.c | 44 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwrq_umr_mode() local 49 oversized = xsk->chunk_size < (1 << page_shift); in mlx5e_mpwrq_umr_mode() 50 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); in mlx5e_mpwrq_umr_mode() 126 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; in mlx5e_mpwrq_pages_per_wqe() 322 u8 page_shift, in mlx5e_verify_rx_mpwqe_strides() argument 353 page_shift, umr_mode); in mlx5e_verify_params_rx_mpwqe_strides() 378 log_num_strides, page_shift, in mlx5e_rx_mpwqe_is_linear_skb() 387 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; in mlx5e_mpwqe_get_log_rq_size() local 390 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwqe_get_log_rq_size() 553 page_shift, umr_mode); in mlx5e_mpwrq_validate_xsk() [all …]
|
| A D | params.h | 63 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 65 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 67 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 69 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, 71 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 75 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
|
| /linux/drivers/pci/endpoint/ |
| A D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order() local 29 size >>= page_shift; in pci_epc_mem_get_order() 53 unsigned int page_shift; in pci_epc_multi_mem_init() local 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 74 pages = windows[i].size >> page_shift; in pci_epc_multi_mem_init() 183 unsigned int page_shift; in pci_epc_mem_alloc_addr() local 198 page_shift = ilog2(mem->window.page_size); in pci_epc_mem_alloc_addr() 200 ((phys_addr_t)pageno << page_shift); in pci_epc_mem_alloc_addr() 248 unsigned int page_shift; in pci_epc_mem_free_addr() local 260 page_shift = ilog2(page_size); in pci_epc_mem_free_addr() [all …]
|
| /linux/tools/testing/selftests/powerpc/mm/ |
| A D | bad_accesses.c | 69 unsigned long i, j, addr, region_shift, page_shift, page_size; in test() local 84 page_shift = 16; in test() 86 page_shift = 12; in test() 103 (1 << page_shift) >> 10, in test() 121 for (j = page_shift - 1; j < 60; j++) { in test() 130 addr = (base | delta) & ~((1 << page_shift) - 1); in test()
|
| /linux/arch/powerpc/include/asm/ |
| A D | ultravisor.h | 50 u64 page_shift) in uv_page_in() argument 53 page_shift); in uv_page_in() 57 u64 page_shift) in uv_page_out() argument 60 page_shift); in uv_page_out() 75 static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) in uv_page_inval() argument 77 return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); in uv_page_inval()
|
| A D | kvm_book3s_uvmem.h | 15 unsigned long page_shift); 19 unsigned long page_shift); 54 unsigned long flags, unsigned long page_shift) in kvmppc_h_svm_page_in() argument 61 unsigned long flags, unsigned long page_shift) in kvmppc_h_svm_page_out() argument
|
| A D | iommu.h | 171 __u32 page_shift, 176 __u32 page_shift, 299 extern int iommu_tce_check_ioba(unsigned long page_shift, 302 extern int iommu_tce_check_gpa(unsigned long page_shift,
|
| /linux/tools/testing/selftests/kvm/lib/aarch64/ |
| A D | processor.c | 30 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index() 38 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index() 49 unsigned int shift = (vm->page_shift - 3) + vm->page_shift; in pmd_index() 61 return (gva >> vm->page_shift) & mask; in pte_index() 75 pte = pa & GENMASK(49, vm->page_shift); in addr_pte() 79 pte = pa & GENMASK(47, vm->page_shift); in addr_pte() 80 if (vm->page_shift == 16) in addr_pte() 97 if (vm->page_shift == 16) in pte_addr() 106 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in ptrs_per_pgd() 112 return 1 << (vm->page_shift - 3); in ptrs_per_pte() [all …]
|
| /linux/arch/powerpc/kvm/ |
| A D | book3s_64_vio.c | 150 if ((tbltmp->it_page_shift <= stt->page_shift) && in kvm_spapr_tce_attach_iommu_group() 152 stt->offset << stt->page_shift) && in kvm_spapr_tce_attach_iommu_group() 154 stt->size << stt->page_shift)) { in kvm_spapr_tce_attach_iommu_group() 309 if (!args->size || args->page_shift < 12 || args->page_shift > 34 || in kvm_vm_ioctl_create_spapr_tce() 324 stt->page_shift = args->page_shift; in kvm_vm_ioctl_create_spapr_tce() 391 if (iommu_tce_check_gpa(stt->page_shift, gpa)) in kvmppc_tce_validate() 614 entry = ioba >> stt->page_shift; in kvmppc_h_put_tce() 655 entry = ioba >> stt->page_shift; in kvmppc_h_put_tce_indirect() 754 unsigned long entry = ioba >> stt->page_shift; in kvmppc_h_stuff_tce() 772 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); in kvmppc_h_stuff_tce() [all …]
|
| A D | book3s_hv_uvmem.c | 569 gpa, 0, page_shift); in __kvmppc_svm_page_out() 588 unsigned long page_shift, in kvmppc_svm_page_out() argument 743 unsigned long page_shift, in kvmppc_svm_page_in() argument 781 gpa, 0, page_shift); in kvmppc_svm_page_in() 878 unsigned long page_shift) in kvmppc_share_page() argument 919 page_shift)) { in kvmppc_share_page() 938 unsigned long page_shift) in kvmppc_h_svm_page_in() argument 949 if (page_shift != PAGE_SHIFT) in kvmppc_h_svm_page_in() 971 end = start + (1UL << page_shift); in kvmppc_h_svm_page_in() 1058 if (page_shift != PAGE_SHIFT) in kvmppc_h_svm_page_out() [all …]
|
| /linux/tools/testing/selftests/mm/ |
| A D | hmm-tests.c | 72 unsigned int page_shift; in FIXTURE() local 95 unsigned int page_shift; in FIXTURE() local 304 size = npages << self->page_shift; in TEST_F() 367 size = npages << self->page_shift; in TEST_F() 426 size = npages << self->page_shift; in TEST_F() 474 size = npages << self->page_shift; in TEST_F() 542 size = npages << self->page_shift; in TEST_F() 620 size = npages << self->page_shift; in TEST_F() 712 npages = size >> self->page_shift; in TEST_F() 804 npages = size >> self->page_shift; in TEST_F() [all …]
|
| /linux/include/linux/ |
| A D | kmsan.h | 145 unsigned int page_shift); 171 unsigned int page_shift); 351 struct page **pages, unsigned int page_shift) in kmsan_vmap_pages_range_noflush() argument 365 unsigned int page_shift) in kmsan_ioremap_page_range() argument
|
| /linux/arch/powerpc/platforms/pseries/ |
| A D | iommu.c | 569 tbl->it_offset = win_addr >> page_shift; in iommu_table_setparms_common() 570 tbl->it_size = window_size >> page_shift; in iommu_table_setparms_common() 571 tbl->it_page_shift = page_shift; in iommu_table_setparms_common() 1362 u32 page_shift, u32 window_shift) in ddw_property_create() argument 1407 int page_shift; in enable_ddw() local 1506 if (!page_shift) { in enable_ddw() 1533 1ULL << page_shift); in enable_ddw() 1563 << page_shift); in enable_ddw() 1868 size = window_size >> (page_shift - 3); in spapr_tce_get_table_size() 1977 (page_shift == IOMMU_PAGE_SHIFT_4K)) in is_default_window_request() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | mr.c | 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 204 mtt->page_shift = page_shift; in mlx4_mtt_init() 419 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 591 int page_shift, struct mlx4_mpt_entry *mpt_entry) in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 601 mpt_entry->entity_size = cpu_to_be32(page_shift); in mlx4_mr_rereg_mem_write() 610 if (mr->mtt.page_shift == 0) in mlx4_mr_rereg_mem_write() 613 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { in mlx4_mr_rereg_mem_write() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/x86_64/ |
| A D | vmx.c | 380 pte->address = paddr >> vm->page_shift; in nested_create_pte() 382 pte->address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_create_pte() 416 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map() 424 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map() 506 i = (region->region.guest_phys_addr >> vm->page_shift) - 1; in nested_map_memslot() 507 last = i + (region->region.memory_size >> vm->page_shift); in nested_map_memslot() 514 (uint64_t)i << vm->page_shift, in nested_map_memslot() 515 (uint64_t)i << vm->page_shift, in nested_map_memslot() 516 1 << vm->page_shift); in nested_map_memslot()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| A D | bpf_iter_vma_offset.c | 13 __u32 page_shift = 0; variable 33 offset = address - vma->vm_start + (vma->vm_pgoff << page_shift); in get_vma_offset()
|
| /linux/arch/powerpc/platforms/powernv/ |
| A D | pci-ioda-tce.c | 50 u64 dma_offset, unsigned int page_shift) in pnv_pci_setup_iommu_table() argument 54 tbl->it_page_shift = page_shift; in pnv_pci_setup_iommu_table() 291 __u32 page_shift, __u64 window_size, __u32 levels, in pnv_pci_ioda2_table_alloc_pages() argument 298 unsigned int entries_shift = window_shift - page_shift; in pnv_pci_ioda2_table_alloc_pages() 314 if ((level_shift - 3) * levels + page_shift >= 55) in pnv_pci_ioda2_table_alloc_pages() 348 page_shift); in pnv_pci_ioda2_table_alloc_pages()
|
| A D | pci.h | 285 extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, 314 __u32 page_shift, __u64 window_size, __u32 levels, 325 u64 dma_offset, unsigned int page_shift);
|
| /linux/drivers/mtd/nand/raw/ |
| A D | nand_bbt.c | 180 from = ((loff_t)page) << this->page_shift; in read_bbt() 561 int blocktopage = this->bbt_erase_shift - this->page_shift; in search_bbt() 666 (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 694 page = block << (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 818 to = ((loff_t)page) << this->page_shift; in write_bbt() 834 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; in write_bbt() 841 pageoffs = page - (int)(to >> this->page_shift); in write_bbt() 842 offs = pageoffs << this->page_shift; in write_bbt() 869 (len >> this->page_shift)* mtd->oobsize); in write_bbt() 1087 len += (len >> this->page_shift) * mtd->oobsize; in nand_update_bbt() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 262 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 265 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 286 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; in ____vm_create() 1050 guest_paddr >> vm->page_shift, npages); in vm_mem_add() 2028 base = pg = paddr_min >> vm->page_shift; in __vm_phy_pages_alloc() 2091 unsigned int page_shift, in vm_calc_num_pages() argument 2095 unsigned int n = 1 << (new_page_shift - page_shift); in vm_calc_num_pages() 2097 if (page_shift >= new_page_shift) in vm_calc_num_pages() 2112 vm_guest_mode_params[mode].page_shift, in vm_num_host_pages() 2120 vm_guest_mode_params[mode].page_shift, false); in vm_num_guest_pages() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| A D | mr.c | 368 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr() 560 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page() 561 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page() 595 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg() 772 if (mr->page_shift) { in rvt_lkey_ok() 780 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok() 781 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok() 879 if (mr->page_shift) { in rvt_rkey_ok() 887 entries_spanned_by_off = off >> mr->page_shift; in rvt_rkey_ok() 888 off -= (entries_spanned_by_off << mr->page_shift); in rvt_rkey_ok()
|
| /linux/include/rdma/ |
| A D | ib_umem_odp.h | 44 unsigned int page_shift; member 67 umem_odp->page_shift; in ib_umem_odp_num_pages()
|