1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 
24 void bad_page_fault(struct pt_regs*, unsigned long, int);
25 
vmalloc_fault(struct pt_regs * regs,unsigned int address)26 static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
27 {
28 #ifdef CONFIG_MMU
29 	/* Synchronize this task's top level page-table
30 	 * with the 'reference' page table.
31 	 */
32 	struct mm_struct *act_mm = current->active_mm;
33 	int index = pgd_index(address);
34 	pgd_t *pgd, *pgd_k;
35 	p4d_t *p4d, *p4d_k;
36 	pud_t *pud, *pud_k;
37 	pmd_t *pmd, *pmd_k;
38 	pte_t *pte_k;
39 
40 	if (act_mm == NULL)
41 		goto bad_page_fault;
42 
43 	pgd = act_mm->pgd + index;
44 	pgd_k = init_mm.pgd + index;
45 
46 	if (!pgd_present(*pgd_k))
47 		goto bad_page_fault;
48 
49 	pgd_val(*pgd) = pgd_val(*pgd_k);
50 
51 	p4d = p4d_offset(pgd, address);
52 	p4d_k = p4d_offset(pgd_k, address);
53 	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
54 		goto bad_page_fault;
55 
56 	pud = pud_offset(p4d, address);
57 	pud_k = pud_offset(p4d_k, address);
58 	if (!pud_present(*pud) || !pud_present(*pud_k))
59 		goto bad_page_fault;
60 
61 	pmd = pmd_offset(pud, address);
62 	pmd_k = pmd_offset(pud_k, address);
63 	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
64 		goto bad_page_fault;
65 
66 	pmd_val(*pmd) = pmd_val(*pmd_k);
67 	pte_k = pte_offset_kernel(pmd_k, address);
68 
69 	if (!pte_present(*pte_k))
70 		goto bad_page_fault;
71 	return;
72 
73 bad_page_fault:
74 	bad_page_fault(regs, address, SIGKILL);
75 #else
76 	WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
77 #endif
78 }
79 /*
80  * This routine handles page faults.  It determines the address,
81  * and the problem, and then passes it off to one of the appropriate
82  * routines.
83  *
84  * Note: does not handle Miss and MultiHit.
85  */
86 
do_page_fault(struct pt_regs * regs)87 void do_page_fault(struct pt_regs *regs)
88 {
89 	struct vm_area_struct * vma;
90 	struct mm_struct *mm = current->mm;
91 	unsigned int exccause = regs->exccause;
92 	unsigned int address = regs->excvaddr;
93 	int code;
94 
95 	int is_write, is_exec;
96 	vm_fault_t fault;
97 	unsigned int flags = FAULT_FLAG_DEFAULT;
98 
99 	code = SEGV_MAPERR;
100 
101 	/* We fault-in kernel-space virtual memory on-demand. The
102 	 * 'reference' page table is init_mm.pgd.
103 	 */
104 	if (address >= TASK_SIZE && !user_mode(regs)) {
105 		vmalloc_fault(regs, address);
106 		return;
107 	}
108 
109 	/* If we're in an interrupt or have no user
110 	 * context, we must not take the fault..
111 	 */
112 	if (faulthandler_disabled() || !mm) {
113 		bad_page_fault(regs, address, SIGSEGV);
114 		return;
115 	}
116 
117 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
118 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
119 		    exccause == EXCCAUSE_ITLB_MISS ||
120 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
121 
122 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
123 		 current->comm, current->pid,
124 		 address, exccause, regs->pc,
125 		 is_write ? "w" : "", is_exec ? "x" : "");
126 
127 	if (user_mode(regs))
128 		flags |= FAULT_FLAG_USER;
129 
130 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
131 
132 retry:
133 	mmap_read_lock(mm);
134 	vma = find_vma(mm, address);
135 
136 	if (!vma)
137 		goto bad_area;
138 	if (vma->vm_start <= address)
139 		goto good_area;
140 	if (!(vma->vm_flags & VM_GROWSDOWN))
141 		goto bad_area;
142 	if (expand_stack(vma, address))
143 		goto bad_area;
144 
145 	/* Ok, we have a good vm_area for this memory access, so
146 	 * we can handle it..
147 	 */
148 
149 good_area:
150 	code = SEGV_ACCERR;
151 
152 	if (is_write) {
153 		if (!(vma->vm_flags & VM_WRITE))
154 			goto bad_area;
155 		flags |= FAULT_FLAG_WRITE;
156 	} else if (is_exec) {
157 		if (!(vma->vm_flags & VM_EXEC))
158 			goto bad_area;
159 	} else	/* Allow read even from write-only pages. */
160 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
161 			goto bad_area;
162 
163 	/* If for any reason at all we couldn't handle the fault,
164 	 * make sure we exit gracefully rather than endlessly redo
165 	 * the fault.
166 	 */
167 	fault = handle_mm_fault(vma, address, flags, regs);
168 
169 	if (fault_signal_pending(fault, regs)) {
170 		if (!user_mode(regs))
171 			bad_page_fault(regs, address, SIGKILL);
172 		return;
173 	}
174 
175 	/* The fault is fully completed (including releasing mmap lock) */
176 	if (fault & VM_FAULT_COMPLETED)
177 		return;
178 
179 	if (unlikely(fault & VM_FAULT_ERROR)) {
180 		if (fault & VM_FAULT_OOM)
181 			goto out_of_memory;
182 		else if (fault & VM_FAULT_SIGSEGV)
183 			goto bad_area;
184 		else if (fault & VM_FAULT_SIGBUS)
185 			goto do_sigbus;
186 		BUG();
187 	}
188 
189 	if (fault & VM_FAULT_RETRY) {
190 		flags |= FAULT_FLAG_TRIED;
191 
192 		/* No need to mmap_read_unlock(mm) as we would
193 		 * have already released it in __lock_page_or_retry
194 		 * in mm/filemap.c.
195 		 */
196 
197 		goto retry;
198 	}
199 
200 	mmap_read_unlock(mm);
201 	return;
202 
203 	/* Something tried to access memory that isn't in our memory map..
204 	 * Fix it, but check if it's kernel or user first..
205 	 */
206 bad_area:
207 	mmap_read_unlock(mm);
208 	if (user_mode(regs)) {
209 		force_sig_fault(SIGSEGV, code, (void *) address);
210 		return;
211 	}
212 	bad_page_fault(regs, address, SIGSEGV);
213 	return;
214 
215 
216 	/* We ran out of memory, or some other thing happened to us that made
217 	 * us unable to handle the page fault gracefully.
218 	 */
219 out_of_memory:
220 	mmap_read_unlock(mm);
221 	if (!user_mode(regs))
222 		bad_page_fault(regs, address, SIGKILL);
223 	else
224 		pagefault_out_of_memory();
225 	return;
226 
227 do_sigbus:
228 	mmap_read_unlock(mm);
229 
230 	/* Send a sigbus, regardless of whether we were in kernel
231 	 * or user mode.
232 	 */
233 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
234 
235 	/* Kernel mode? Handle exceptions or die */
236 	if (!user_mode(regs))
237 		bad_page_fault(regs, address, SIGBUS);
238 	return;
239 }
240 
241 
242 void
bad_page_fault(struct pt_regs * regs,unsigned long address,int sig)243 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
244 {
245 	extern void __noreturn die(const char*, struct pt_regs*, long);
246 	const struct exception_table_entry *entry;
247 
248 	/* Are we prepared to handle this kernel fault?  */
249 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
250 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
251 			 current->comm, regs->pc, entry->fixup);
252 		regs->pc = entry->fixup;
253 		return;
254 	}
255 
256 	/* Oops. The kernel tried to access some bad page. We'll have to
257 	 * terminate things with extreme prejudice.
258 	 */
259 	pr_alert("Unable to handle kernel paging request at virtual "
260 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
261 		 address, regs->pc, regs->areg[0]);
262 	die("Oops", regs, sig);
263 }
264