1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/mm.h"
10 
11 #include "hf/arch/barriers.h"
12 #include "hf/arch/vm/mm.h"
13 
14 #include "hf/dlog.h"
15 
16 #include "../msr.h"
17 
18 #define STAGE1_DEVICEINDX UINT64_C(0)
19 #define STAGE1_NORMALINDX UINT64_C(1)
20 
21 static uintreg_t mm_mair_el1;
22 static uintreg_t mm_tcr_el1;
23 static uintreg_t mm_sctlr_el1;
24 
25 static uintreg_t mm_reset_ttbr0_el1;
26 static uintreg_t mm_reset_mair_el1;
27 static uintreg_t mm_reset_tcr_el1;
28 static uintreg_t mm_reset_sctlr_el1;
29 
30 /* For hftest, limit Stage1 PA range to 512GB (1 << 39) */
31 #define HFTEST_S1_PA_BITS (39)
32 
33 /**
34  * Initialize MMU for a test running in EL1.
35  */
arch_vm_mm_init(void)36 bool arch_vm_mm_init(void)
37 {
38 	uint64_t features = read_msr(id_aa64mmfr0_el1);
39 	uint32_t pa_bits = arch_mm_get_pa_range();
40 
41 	/* Check that 4KB granules are supported. */
42 	if (((features >> 28) & 0xf) == 0xf) {
43 		dlog_error("4KB granules are not supported\n");
44 		return false;
45 	}
46 
47 	/* Check the physical address range. */
48 	if (!pa_bits) {
49 		dlog_error(
50 			"Unsupported value of id_aa64mmfr0_el1.PARange: %x\n",
51 			features & 0xf);
52 		return false;
53 	}
54 
55 	/*
56 	 * Limit PA bits to HFTEST_S1_PA_BITS. Using the pa_bits reported by
57 	 * arch_mm_get_pa_range requires an increase in page pool size.
58 	 */
59 	arch_mm_stage1_max_level_set(HFTEST_S1_PA_BITS);
60 
61 	/*
62 	 * Preserve initial values of the system registers in case we want to
63 	 * reset them.
64 	 */
65 	mm_reset_ttbr0_el1 = read_msr(ttbr0_el1);
66 	mm_reset_mair_el1 = read_msr(mair_el1);
67 	mm_reset_tcr_el1 = read_msr(tcr_el1);
68 	mm_reset_sctlr_el1 = read_msr(sctlr_el1);
69 
70 	/*
71 	 * 0    -> Device-nGnRnE memory
72 	 * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
73 	 *         Write-Alloc, Read-Alloc.
74 	 */
75 	mm_mair_el1 = (0 << (8 * STAGE1_DEVICEINDX)) |
76 		      (0xff << (8 * STAGE1_NORMALINDX));
77 
78 	mm_tcr_el1 = (1 << 20) |		/* TBI, top byte ignored. */
79 		     ((features & 0xf) << 16) | /* PS. */
80 		     (0 << 14) |		/* TG0, granule size, 4KB. */
81 		     (3 << 12) |		/* SH0, inner shareable. */
82 		     (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
83 		     (1 << 8) |	 /* IRGN0, normal mem, WB RA WA Cacheable. */
84 		     (64 - HFTEST_S1_PA_BITS) | /* T0SZ, 2^hftest_s1_pa_bits */
85 		     0;
86 
87 	mm_sctlr_el1 = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
88 		       (1 << 2) |  /* C, data cache enable. */
89 		       (1 << 3) |  /* SA, enable stack alignment check. */
90 		       (3 << 4) |  /* RES1 bits. */
91 		       (1 << 11) | /* RES1 bit. */
92 		       (1 << 12) | /* I, instruction cache enable. */
93 		       (1 << 16) | /* RES1 bit. */
94 		       (1 << 18) | /* RES1 bit. */
95 		       (0 << 19) | /* WXN bit, writable execute never. */
96 		       (3 << 22) | /* RES1 bits. */
97 		       (3 << 28) | /* RES1 bits. */
98 		       0;
99 
100 	return true;
101 }
102 
arch_vm_mm_enable(paddr_t table)103 void arch_vm_mm_enable(paddr_t table)
104 {
105 	/* Configure translation management registers. */
106 	write_msr(ttbr0_el1, pa_addr(table));
107 	write_msr(mair_el1, mm_mair_el1);
108 	write_msr(tcr_el1, mm_tcr_el1);
109 
110 	/* Configure sctlr_el1 to enable MMU and cache. */
111 	dsb(sy);
112 	isb();
113 	write_msr(sctlr_el1, mm_sctlr_el1);
114 	isb();
115 }
116 
arch_vm_mm_reset(void)117 void arch_vm_mm_reset(void)
118 {
119 	/* Set system registers to their reset values. */
120 	write_msr(ttbr0_el1, mm_reset_ttbr0_el1);
121 	write_msr(mair_el1, mm_reset_mair_el1);
122 	write_msr(tcr_el1, mm_reset_tcr_el1);
123 
124 	dsb(sy);
125 	isb();
126 	write_msr(sctlr_el1, mm_reset_sctlr_el1);
127 	isb();
128 }
129