| /arch/xtensa/kernel/ |
| A D | setup.c | 219 early_init_dt_scan(params, __pa(params)); in early_init_devtree() 294 !mem_reserve(__pa(initrd_start), __pa(initrd_end))) in setup_arch() 300 mem_reserve(__pa(_stext), __pa(_end)); in setup_arch() 303 mem_reserve(__pa(_xip_text_start), __pa(_xip_text_end)); in setup_arch() 305 mem_reserve(__pa(_xip_start), __pa(_xip_end)); in setup_arch() 311 __pa(_WindowVectors_text_end)); in setup_arch() 315 __pa(_DebugInterruptVector_text_end)); in setup_arch() 321 __pa(_UserExceptionVector_text_end)); in setup_arch() 326 mem_reserve(__pa(_exception_text_start), in setup_arch() 327 __pa(_exception_text_end)); in setup_arch() [all …]
|
| A D | hibernate.c | 9 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); in pfn_is_nosave() 10 unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); in pfn_is_nosave()
|
| /arch/x86/include/asm/ |
| A D | pgalloc.h | 65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel() 66 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in pmd_populate_kernel() 72 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel_safe() 99 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate() 100 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); in pud_populate() 105 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate_safe() 113 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate() 114 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); in p4d_populate() 119 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate_safe() 136 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate() [all …]
|
| A D | page.h | 40 #ifndef __pa 41 #define __pa(x) __phys_addr((unsigned long)(x)) macro 62 #define __boot_pa(x) __pa(x) 68 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
| /arch/parisc/kernel/ |
| A D | firmware.c | 162 __pa(pdc_result), 0); in set_firmware_width_unlocked() 275 __pa(pdc_result), __pa(pdc_result2), len); in pdc_chassis_info() 366 __pa(pdc_result)); in pdc_pat_get_PDC_entrypoint() 642 __pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no)); in pdc_model_platform_info() 942 __pa(pdc_result), __pa(hwpath)); in pdc_get_initiator() 1025 __pa(pdc_result), hpa, __pa(tbl)); in pdc_pci_irt() 1387 __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); in pdc_iodc_print() 1415 __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0); in pdc_iodc_getc() 1521 __pa(pdc_result), __pa(&result), *actcnt, in pdc_pat_cell_info() 1634 __pa(pdc_result)); in pdc_pat_pd_get_pdc_revisions() [all …]
|
| /arch/csky/kernel/ |
| A D | setup.c | 25 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { in setup_initrd() 32 if (memblock_is_region_reserved(__pa(initrd_start), size)) { in setup_initrd() 34 __pa(initrd_start), size); in setup_initrd() 38 memblock_reserve(__pa(initrd_start), size); in setup_initrd() 61 memblock_reserve(__pa(_start), _end - _start); in csky_memblock_init() 158 early_init_dt_scan(__dtb_start, __pa(dtb_start)); in csky_start() 160 early_init_dt_scan(dtb_start, __pa(dtb_start)); in csky_start()
|
| /arch/arm/mach-omap2/ |
| A D | omap-secure.c | 81 outer_clean_range(__pa(param), __pa(param + 5)); in omap_secure_dispatcher() 82 ret = omap_smc2(idx, flag, __pa(param)); in omap_secure_dispatcher() 131 param[1] = __pa(addr); /* Physical address for saving */ in omap3_save_secure_ram() 136 ret = save_secure_ram_context(__pa(param)); in omap3_save_secure_ram() 174 outer_clean_range(__pa(param), __pa(param + 5)); in rx51_secure_dispatcher() 175 ret = omap_smc3(idx, process, flag, __pa(param)); in rx51_secure_dispatcher()
|
| /arch/x86/power/ |
| A D | hibernate_64.c | 71 __pud(__pa(pmd) | pgprot_val(pgtable_prot))); in set_up_temporary_text_mapping() 73 p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() 74 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() 80 pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() 123 temp_pgt = __pa(pgd); in set_up_temporary_mappings()
|
| /arch/um/kernel/ |
| A D | physmem.c | 90 os_seek_file(physmem_fd, __pa(__syscall_stub_start)); in setup_physmem() 93 memblock_add(__pa(start), len); in setup_physmem() 94 memblock_reserve(__pa(start), reserve); in setup_physmem() 96 min_low_pfn = PFN_UP(__pa(reserve_end)); in setup_physmem() 108 else if (phys < __pa(end_iomem)) { in phys_mapping() 190 region->phys = __pa(region->virt); in setup_iomem()
|
| A D | mem.c | 68 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); in arch_mm_preinit() 71 min_low_pfn = PFN_UP(__pa(uml_reserved)); in arch_mm_preinit() 95 (unsigned long) __pa(pte))); in one_page_table_init() 108 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); in one_md_table_init() 121 set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table))); in one_ud_table_init() 174 p = __pa(v); in fixaddr_user_init()
|
| /arch/powerpc/mm/nohash/ |
| A D | 8xx.c | 33 return __pa(va); in v_block_mapped() 145 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); in mmu_mapin_ram() 146 unsigned long sinittext = __pa(_sinittext); in mmu_mapin_ram() 149 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); in mmu_mapin_ram() 173 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); in mmu_mark_initmem_nx() 174 unsigned long sinittext = __pa(_sinittext); in mmu_mark_initmem_nx() 176 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); in mmu_mark_initmem_nx() 191 unsigned long sinittext = __pa(_sinittext); in mmu_mark_rodata_ro()
|
| /arch/loongarch/mm/ |
| A D | kasan_init.c | 20 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d))) 27 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud))) 34 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd))) 38 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte))) 41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) 110 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); in kasan_alloc_zeroed_page() 113 __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS)); in kasan_alloc_zeroed_page() 115 return __pa(p); in kasan_alloc_zeroed_page()
|
| /arch/riscv/mm/ |
| A D | kasan_init.c | 36 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); in kasan_populate_pte() 58 set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); in kasan_populate_pmd() 89 set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); in kasan_populate_pud() 120 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); in kasan_populate_p4d() 254 phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd); in kasan_early_populate_pud() 292 phys_addr = __pa((uintptr_t)kasan_early_shadow_pud); in kasan_early_populate_p4d() 336 (__pa((uintptr_t)kasan_early_shadow_pte)), in kasan_early_init() 343 (__pa(((uintptr_t)kasan_early_shadow_pmd))), in kasan_early_init() 351 (__pa(((uintptr_t)kasan_early_shadow_pud))), in kasan_early_init() 468 pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE)); in create_tmp_mapping() [all …]
|
| /arch/arm/mm/ |
| A D | pmsa-v8.c | 255 subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup() 256 subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup() 260 subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 261 subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 277 err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 280 err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup()
|
| /arch/x86/kernel/ |
| A D | machine_kexec_32.c | 89 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one() 95 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in machine_kexec_page_table_set_one() 111 (unsigned long)control_page, __pa(control_page)); in machine_kexec_prepare_page_tables() 117 __pa(control_page), __pa(control_page)); in machine_kexec_prepare_page_tables() 196 page_list[PA_CONTROL_PAGE] = __pa(control_page); in machine_kexec() 198 page_list[PA_PGD] = __pa(image->arch.pgd); in machine_kexec()
|
| A D | espfix_64.c | 168 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); in init_espfix_ap() 169 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); in init_espfix_ap() 180 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); in init_espfix_ap() 181 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); in init_espfix_ap() 192 pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask)); in init_espfix_ap()
|
| /arch/hexagon/include/asm/ |
| A D | page.h | 83 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) macro 90 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) 95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 121 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn()
|
| A D | pgalloc.h | 39 mm->context.ptbase = __pa(pgd); in pgd_alloc() 77 set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); in pmd_populate_kernel() 85 set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); in pmd_populate_kernel()
|
| /arch/parisc/mm/ |
| A D | init.c | 287 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), in setup_bootmem() 303 if (__pa(initrd_start) < mem_max) { in setup_bootmem() 306 if (__pa(initrd_end) > mem_max) { in setup_bootmem() 307 initrd_reserve = mem_max - __pa(initrd_start); in setup_bootmem() 312 …printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initr… in setup_bootmem() 360 ro_start = __pa((unsigned long)_text); in map_pages() 361 ro_end = __pa((unsigned long)&data_start); in map_pages() 362 kernel_start = __pa((unsigned long)&__init_begin); in map_pages() 363 kernel_end = __pa((unsigned long)&_end); in map_pages() 443 map_pages(start, __pa(start), end-start, in set_kernel_text_rw() [all …]
|
| /arch/powerpc/platforms/pseries/ |
| A D | svm.c | 49 uv_unshare_page(PHYS_PFN(__pa(addr)), numpages); in set_memory_encrypted() 62 uv_share_page(PHYS_PFN(__pa(addr)), numpages); in set_memory_decrypted() 86 unsigned long pfn = PHYS_PFN(__pa(addr)); in dtl_cache_ctor()
|
| /arch/x86/xen/ |
| A D | p2m.c | 376 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); in xen_rebuild_p2m_list() 386 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); in xen_rebuild_p2m_list() 396 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); in xen_rebuild_p2m_list() 445 if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) in get_phys_to_machine() 495 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); in alloc_p2m_pmd() 575 if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || in xen_alloc_p2m_entry() 576 p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { in xen_alloc_p2m_entry() 584 if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) in xen_alloc_p2m_entry() 595 pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL)); in xen_alloc_p2m_entry() 658 if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing))) in __set_phys_to_machine() [all …]
|
| /arch/powerpc/kernel/ |
| A D | suspend.c | 20 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; in pfn_is_nosave() 21 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave()
|
| /arch/s390/mm/ |
| A D | pgalloc.c | 87 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade() 96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade() 278 *ste = __pa(table) | _SEGMENT_ENTRY; in base_segment_walk() 307 *rtte = __pa(table) | _REGION3_ENTRY; in base_region3_walk() 335 *rste = __pa(table) | _REGION2_ENTRY; in base_region2_walk() 363 *rfte = __pa(table) | _REGION1_ENTRY; in base_region1_walk() 447 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH; in base_asce_alloc() 453 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; in base_asce_alloc() 459 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; in base_asce_alloc() 465 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH; in base_asce_alloc()
|
| /arch/powerpc/include/asm/ |
| A D | page.h | 190 #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET) macro 207 #define __pa(x) \ macro 215 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) macro 222 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn()
|
| /arch/sh/kernel/ |
| A D | swsusp.c | 21 unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; in pfn_is_nosave() 22 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave()
|