1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #include <linux/export.h>
6 #include <linux/io.h>
7 #include <linux/memblock.h>
8 #include <linux/mm.h>
9 #include <linux/mman.h>
10
11 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
12 EXPORT_SYMBOL(shm_align_mask);
13
14 #define COLOUR_ALIGN(addr, pgoff) \
15 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
16 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
17
18 enum mmap_allocation_direction {UP, DOWN};
19
arch_get_unmapped_area_common(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,enum mmap_allocation_direction dir)20 static unsigned long arch_get_unmapped_area_common(struct file *filp,
21 unsigned long addr0, unsigned long len, unsigned long pgoff,
22 unsigned long flags, enum mmap_allocation_direction dir)
23 {
24 struct mm_struct *mm = current->mm;
25 struct vm_area_struct *vma;
26 unsigned long addr = addr0;
27 int do_color_align;
28 struct vm_unmapped_area_info info;
29
30 if (unlikely(len > TASK_SIZE))
31 return -ENOMEM;
32
33 if (flags & MAP_FIXED) {
34 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
35 if (TASK_SIZE - len < addr)
36 return -EINVAL;
37
38 /*
39 * We do not accept a shared mapping if it would violate
40 * cache aliasing constraints.
41 */
42 if ((flags & MAP_SHARED) &&
43 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
44 return -EINVAL;
45 return addr;
46 }
47
48 do_color_align = 0;
49 if (filp || (flags & MAP_SHARED))
50 do_color_align = 1;
51
52 /* requesting a specific address */
53 if (addr) {
54 if (do_color_align)
55 addr = COLOUR_ALIGN(addr, pgoff);
56 else
57 addr = PAGE_ALIGN(addr);
58
59 vma = find_vma(mm, addr);
60 if (TASK_SIZE - len >= addr &&
61 (!vma || addr + len <= vm_start_gap(vma)))
62 return addr;
63 }
64
65 info.length = len;
66 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
67 info.align_offset = pgoff << PAGE_SHIFT;
68
69 if (dir == DOWN) {
70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
71 info.low_limit = PAGE_SIZE;
72 info.high_limit = mm->mmap_base;
73 addr = vm_unmapped_area(&info);
74
75 if (!(addr & ~PAGE_MASK))
76 return addr;
77
78 /*
79 * A failed mmap() very likely causes application failure,
80 * so fall back to the bottom-up function here. This scenario
81 * can happen with large stack limits and large mmap()
82 * allocations.
83 */
84 }
85
86 info.flags = 0;
87 info.low_limit = mm->mmap_base;
88 info.high_limit = TASK_SIZE;
89 return vm_unmapped_area(&info);
90 }
91
arch_get_unmapped_area(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)92 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
93 unsigned long len, unsigned long pgoff, unsigned long flags)
94 {
95 return arch_get_unmapped_area_common(filp,
96 addr0, len, pgoff, flags, UP);
97 }
98
99 /*
100 * There is no need to export this but sched.h declares the function as
101 * extern so making it static here results in an error.
102 */
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)103 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
104 unsigned long addr0, unsigned long len, unsigned long pgoff,
105 unsigned long flags)
106 {
107 return arch_get_unmapped_area_common(filp,
108 addr0, len, pgoff, flags, DOWN);
109 }
110
__virt_addr_valid(volatile void * kaddr)111 int __virt_addr_valid(volatile void *kaddr)
112 {
113 unsigned long vaddr = (unsigned long)kaddr;
114
115 if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
116 return 0;
117
118 return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
119 }
120 EXPORT_SYMBOL_GPL(__virt_addr_valid);
121
122 /*
123 * You really shouldn't be using read() or write() on /dev/mem. This might go
124 * away in the future.
125 */
valid_phys_addr_range(phys_addr_t addr,size_t size)126 int valid_phys_addr_range(phys_addr_t addr, size_t size)
127 {
128 /*
129 * Check whether addr is covered by a memory region without the
130 * MEMBLOCK_NOMAP attribute, and whether that region covers the
131 * entire range. In theory, this could lead to false negatives
132 * if the range is covered by distinct but adjacent memory regions
133 * that only differ in other attributes. However, few of such
134 * attributes have been defined, and it is debatable whether it
135 * follows that /dev/mem read() calls should be able traverse
136 * such boundaries.
137 */
138 return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
139 }
140
141 /*
142 * Do not allow /dev/mem mappings beyond the supported physical range.
143 */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)144 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
145 {
146 return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
147 }
148