| /drivers/base/ |
| A D | memory.c | 237 start_pfn, nr_pages); in memory_block_online() 249 arg.altmap_start_pfn = start_pfn; in memory_block_online() 251 arg.start_pfn = start_pfn + nr_vmemmap_pages; in memory_block_online() 266 ret = online_pages(start_pfn + nr_vmemmap_pages, in memory_block_online() 318 ret = offline_pages(start_pfn + nr_vmemmap_pages, in memory_block_offline() 323 adjust_present_page_count(pfn_to_page(start_pfn), in memory_block_offline() 332 arg.altmap_start_pfn = start_pfn; in memory_block_offline() 334 arg.start_pfn = start_pfn + nr_vmemmap_pages; in memory_block_offline() 469 arch_get_memory_phys_device(start_pfn)); in phys_device_show() 512 start_pfn, nr_pages); in valid_zones_show() [all …]
|
| A D | arch_numa.c | 195 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 197 if (start_pfn >= end_pfn) in setup_node_data() 203 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 204 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 217 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 219 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 220 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
| A D | node.c | 865 void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn, in register_memory_blocks_under_node_hotplug() argument 868 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in register_memory_blocks_under_node_hotplug()
|
| /drivers/hv/ |
| A D | hv_balloon.c | 425 unsigned long start_pfn; member 438 unsigned long start_pfn; member 614 while ((pfn >= has->start_pfn) && in hv_page_offline_check() 712 unsigned long start_pfn; in hv_mem_hot_add() local 772 if (pfn < has->start_pfn || in hv_online_page() 796 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) in pfn_covered() 812 gap->end_pfn = start_pfn; in pfn_covered() 815 has->covered_end_pfn = start_pfn; in pfn_covered() 855 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) in handle_pg_range() 880 if (start_pfn > has->start_pfn && in handle_pg_range() [all …]
|
| /drivers/gpu/drm/imagination/ |
| A D | pvr_vm_mips.c | 155 s32 start_pfn; in pvr_vm_mips_map() local 171 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; in pvr_vm_mips_map() 181 for (pfn = start_pfn; pfn <= end_pfn; pfn++) { in pvr_vm_mips_map() 186 (pfn - start_pfn) << in pvr_vm_mips_map() 204 while (--pfn >= start_pfn) in pvr_vm_mips_map() 227 const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> in pvr_vm_mips_unmap() local 232 for (u32 pfn = start_pfn; pfn < end_pfn; pfn++) in pvr_vm_mips_unmap()
|
| /drivers/xen/ |
| A D | balloon.c | 347 unsigned long start_pfn = page_to_pfn(page); in xen_online_page() local 350 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); in xen_online_page() 353 p = pfn_to_page(start_pfn + i); in xen_online_page() 684 unsigned long start_pfn, pages; in balloon_add_regions() local 693 start_pfn = xen_extra_mem[i].start_pfn; in balloon_add_regions() 700 extra_pfn_end = min(max_pfn, start_pfn + pages); in balloon_add_regions() 702 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) in balloon_add_regions() 711 pages = extra_pfn_end - start_pfn; in balloon_add_regions()
|
| A D | grant-table.c | 1062 unsigned long pfn, start_pfn; in gnttab_dma_alloc_pages() local 1083 start_pfn = __phys_to_pfn(args->dev_bus_addr); in gnttab_dma_alloc_pages() 1084 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; in gnttab_dma_alloc_pages()
|
| /drivers/iommu/intel/ |
| A D | iommu.c | 803 unsigned long start_pfn, in dma_pte_clear_range() argument 810 WARN_ON(start_pfn > last_pfn)) in dma_pte_clear_range() 818 start_pfn = align_to_level(start_pfn + 1, large_page + 1); in dma_pte_clear_range() 830 } while (start_pfn && start_pfn <= last_pfn); in dma_pte_clear_range() 838 pfn = max(start_pfn, pfn); in dma_pte_free_level() 877 unsigned long start_pfn, in dma_pte_free_pagetable() argument 925 pfn = max(start_pfn, pfn); in dma_pte_clear_level() 935 if (start_pfn <= level_pfn && in dma_pte_clear_level() 1572 unsigned long start_pfn, in switch_to_super_page() argument 1578 while (start_pfn <= end_pfn) { in switch_to_super_page() [all …]
|
| /drivers/virt/acrn/ |
| A D | mm.c | 179 unsigned long start_pfn, cur_pfn; in acrn_vm_ram_map() local 199 start_pfn = cur_pfn; in acrn_vm_ram_map() 218 if (cur_pfn != start_pfn + i) { in acrn_vm_ram_map() 232 PFN_PHYS(start_pfn), memmap->len, in acrn_vm_ram_map()
|
| /drivers/virtio/ |
| A D | virtio_mem.c | 904 unsigned long start_pfn) in virtio_mem_sbm_notify_online() argument 996 const unsigned long start = PFN_PHYS(mhp->start_pfn); in virtio_mem_memory_notifier_cb() 1047 mhp->start_pfn, in virtio_mem_memory_notifier_cb() 1101 mhp->start_pfn, in virtio_mem_memory_notifier_cb() 1982 unsigned long start_pfn; in virtio_mem_sbm_unplug_sb_online() local 1985 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + in virtio_mem_sbm_unplug_sb_online() 1988 rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages); in virtio_mem_sbm_unplug_sb_online() 1996 virtio_mem_fake_online(start_pfn, nr_pages); in virtio_mem_sbm_unplug_sb_online() 2163 unsigned long end_pfn = start_pfn + nr_pages; in virtio_mem_bbm_offline_remove_and_unplug_bb() 2231 for (pfn = start_pfn; pfn < start_pfn + nr_pages; in virtio_mem_bbm_bb_is_offline() [all …]
|
| /drivers/net/ethernet/ibm/ehea/ |
| A D | ehea_qmr.c | 686 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local 693 start_pfn = initial_pfn; in ehea_create_busmap_callback() 695 pfn = start_pfn; in ehea_create_busmap_callback() 700 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 701 ret = ehea_update_busmap(start_pfn, nr_pages, in ehea_create_busmap_callback() 708 start_pfn = pfn; in ehea_create_busmap_callback() 714 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 715 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_create_busmap_callback()
|
| A D | ehea_main.c | 3259 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier() 3267 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
|
| /drivers/of/ |
| A D | kexec.c | 131 unsigned long start_pfn, end_pfn; in ima_get_kexec_buffer() local 151 start_pfn = PHYS_PFN(tmp_addr); in ima_get_kexec_buffer() 153 if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) { in ima_get_kexec_buffer()
|
| /drivers/iommu/ |
| A D | iova.c | 39 unsigned long start_pfn) in init_iova_domain() argument 53 iovad->start_pfn = start_pfn; in init_iova_domain() 173 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; in __alloc_and_insert_iova_range() 198 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) { in __alloc_and_insert_iova_range()
|
| A D | dma-iommu.c | 690 if (iovad->start_pfn) { in iommu_dma_init_domain() 692 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
|
| /drivers/vfio/pci/nvgrace-gpu/ |
| A D | main.c | 140 unsigned long start_pfn; in nvgrace_gpu_mmap() local 160 check_add_overflow(PHYS_PFN(memregion->memphys), pgoff, &start_pfn) || in nvgrace_gpu_mmap() 198 ret = remap_pfn_range(vma, vma->vm_start, start_pfn, in nvgrace_gpu_mmap() 203 vma->vm_pgoff = start_pfn; in nvgrace_gpu_mmap()
|
| /drivers/s390/char/ |
| A D | sclp_cmd.c | 186 int arch_get_memory_phys_device(unsigned long start_pfn) in arch_get_memory_phys_device() argument 190 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); in arch_get_memory_phys_device() 333 start = arg->start_pfn << PAGE_SHIFT; in sclp_mem_notifier()
|
| /drivers/gpu/drm/gma500/ |
| A D | mmu.h | 71 uint32_t start_pfn,
|
| A D | mmu.c | 621 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument 646 pte = psb_mmu_mask_pte(start_pfn++, type); in psb_mmu_insert_pfn_sequence()
|