1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_MEMORY_MODEL_H 3 #define __ASM_MEMORY_MODEL_H 4 5 #include <linux/pfn.h> 6 7 #ifndef __ASSEMBLY__ 8 9 /* 10 * supports 3 memory models. 11 */ 12 #if defined(CONFIG_FLATMEM) 13 14 #ifndef ARCH_PFN_OFFSET 15 #define ARCH_PFN_OFFSET (0UL) 16 #endif 17 18 #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) 19 #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ 20 ARCH_PFN_OFFSET) 21 22 #ifndef pfn_valid pfn_valid(unsigned long pfn)23static inline int pfn_valid(unsigned long pfn) 24 { 25 /* avoid <linux/mm.h> include hell */ 26 extern unsigned long max_mapnr; 27 unsigned long pfn_offset = ARCH_PFN_OFFSET; 28 29 return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; 30 } 31 #define pfn_valid pfn_valid 32 #endif 33 34 #elif defined(CONFIG_SPARSEMEM_VMEMMAP) 35 36 /* memmap is virtually contiguous. */ 37 #define __pfn_to_page(pfn) (vmemmap + (pfn)) 38 #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) 39 40 #elif defined(CONFIG_SPARSEMEM) 41 /* 42 * Note: section's mem_map is encoded to reflect its start_pfn. 43 * section[i].section_mem_map == mem_map's address - start_pfn; 44 */ 45 #define __page_to_pfn(pg) \ 46 ({ const struct page *__pg = (pg); \ 47 int __sec = page_to_section(__pg); \ 48 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ 49 }) 50 51 #define __pfn_to_page(pfn) \ 52 ({ unsigned long __pfn = (pfn); \ 53 struct mem_section *__sec = __pfn_to_section(__pfn); \ 54 __section_mem_map_addr(__sec) + __pfn; \ 55 }) 56 #endif /* CONFIG_FLATMEM/SPARSEMEM */ 57 58 /* 59 * Convert a physical address to a Page Frame Number and back 60 */ 61 #define __phys_to_pfn(paddr) PHYS_PFN(paddr) 62 #define __pfn_to_phys(pfn) PFN_PHYS(pfn) 63 64 #define page_to_pfn __page_to_pfn 65 #define pfn_to_page __pfn_to_page 66 67 #endif /* __ASSEMBLY__ */ 68 69 #endif 70