1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2014 Google, Inc
4  *
5  * Memory Type Range Regsters - these are used to tell the CPU whether
6  * memory is cacheable and if so the cache write mode to use.
7  *
8  * These can speed up booting. See the mtrr command.
9  *
10  * Reference: Intel Architecture Software Developer's Manual, Volume 3:
11  * System Programming
12  */
13 
14 /*
15  * Note that any console output (e.g. debug()) in this file will likely fail
16  * since the MTRR registers are sometimes in flux.
17  */
18 
19 #include <common.h>
20 #include <cpu_func.h>
21 #include <log.h>
22 #include <sort.h>
23 #include <asm/cache.h>
24 #include <asm/global_data.h>
25 #include <asm/io.h>
26 #include <asm/mp.h>
27 #include <asm/msr.h>
28 #include <asm/mtrr.h>
29 #include <linux/log2.h>
30 
31 DECLARE_GLOBAL_DATA_PTR;
32 
33 /* Prepare to adjust MTRRs */
mtrr_open(struct mtrr_state * state,bool do_caches)34 void mtrr_open(struct mtrr_state *state, bool do_caches)
35 {
36 	if (!gd->arch.has_mtrr)
37 		return;
38 
39 	if (do_caches) {
40 		state->enable_cache = dcache_status();
41 
42 		if (state->enable_cache)
43 			disable_caches();
44 	}
45 	state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
46 	wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
47 }
48 
49 /* Clean up after adjusting MTRRs, and enable them */
mtrr_close(struct mtrr_state * state,bool do_caches)50 void mtrr_close(struct mtrr_state *state, bool do_caches)
51 {
52 	if (!gd->arch.has_mtrr)
53 		return;
54 
55 	wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
56 	if (do_caches && state->enable_cache)
57 		enable_caches();
58 }
59 
set_var_mtrr(uint reg,uint type,uint64_t start,uint64_t size)60 static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
61 {
62 	u64 mask;
63 
64 	wrmsrl(MTRR_PHYS_BASE_MSR(reg), start | type);
65 	mask = ~(size - 1);
66 	mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
67 	wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
68 }
69 
mtrr_read_all(struct mtrr_info * info)70 void mtrr_read_all(struct mtrr_info *info)
71 {
72 	int reg_count = mtrr_get_var_count();
73 	int i;
74 
75 	for (i = 0; i < reg_count; i++) {
76 		info->mtrr[i].base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
77 		info->mtrr[i].mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
78 	}
79 }
80 
mtrr_write_all(struct mtrr_info * info)81 void mtrr_write_all(struct mtrr_info *info)
82 {
83 	int reg_count = mtrr_get_var_count();
84 	struct mtrr_state state;
85 	int i;
86 
87 	for (i = 0; i < reg_count; i++) {
88 		mtrr_open(&state, true);
89 		wrmsrl(MTRR_PHYS_BASE_MSR(i), info->mtrr[i].base);
90 		wrmsrl(MTRR_PHYS_MASK_MSR(i), info->mtrr[i].mask);
91 		mtrr_close(&state, true);
92 	}
93 }
94 
write_mtrrs(void * arg)95 static void write_mtrrs(void *arg)
96 {
97 	struct mtrr_info *info = arg;
98 
99 	mtrr_write_all(info);
100 }
101 
read_mtrrs(void * arg)102 static void read_mtrrs(void *arg)
103 {
104 	struct mtrr_info *info = arg;
105 
106 	mtrr_read_all(info);
107 }
108 
109 /**
110  * mtrr_copy_to_aps() - Copy the MTRRs from the boot CPU to other CPUs
111  *
112  * Return: 0 on success, -ve on failure
113  */
mtrr_copy_to_aps(void)114 static int mtrr_copy_to_aps(void)
115 {
116 	struct mtrr_info info;
117 	int ret;
118 
119 	ret = mp_run_on_cpus(MP_SELECT_BSP, read_mtrrs, &info);
120 	if (ret == -ENXIO)
121 		return 0;
122 	else if (ret)
123 		return log_msg_ret("bsp", ret);
124 
125 	ret = mp_run_on_cpus(MP_SELECT_APS, write_mtrrs, &info);
126 	if (ret)
127 		return log_msg_ret("bsp", ret);
128 
129 	return 0;
130 }
131 
h_comp_mtrr(const void * p1,const void * p2)132 static int h_comp_mtrr(const void *p1, const void *p2)
133 {
134 	const struct mtrr_request *req1 = p1;
135 	const struct mtrr_request *req2 = p2;
136 
137 	s64 diff = req1->start - req2->start;
138 
139 	return diff < 0 ? -1 : diff > 0 ? 1 : 0;
140 }
141 
mtrr_commit(bool do_caches)142 int mtrr_commit(bool do_caches)
143 {
144 	struct mtrr_request *req = gd->arch.mtrr_req;
145 	struct mtrr_state state;
146 	int ret;
147 	int i;
148 
149 	debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
150 	      gd->arch.mtrr_req_count);
151 	if (!gd->arch.has_mtrr)
152 		return -ENOSYS;
153 
154 	debug("open\n");
155 	mtrr_open(&state, do_caches);
156 	debug("open done\n");
157 	qsort(req, gd->arch.mtrr_req_count, sizeof(*req), h_comp_mtrr);
158 	for (i = 0; i < gd->arch.mtrr_req_count; i++, req++)
159 		mtrr_set_next_var(req->type, req->start, req->size);
160 
161 	debug("close\n");
162 	mtrr_close(&state, do_caches);
163 	debug("mtrr done\n");
164 
165 	if (gd->flags & GD_FLG_RELOC) {
166 		ret = mtrr_copy_to_aps();
167 		if (ret)
168 			return log_msg_ret("copy", ret);
169 	}
170 
171 	return 0;
172 }
173 
mtrr_add_request(int type,uint64_t start,uint64_t size)174 int mtrr_add_request(int type, uint64_t start, uint64_t size)
175 {
176 	struct mtrr_request *req;
177 	uint64_t mask;
178 
179 	debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
180 	if (!gd->arch.has_mtrr)
181 		return -ENOSYS;
182 
183 	if (!is_power_of_2(size))
184 		return -EINVAL;
185 
186 	if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
187 		return -ENOSPC;
188 	req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
189 	req->type = type;
190 	req->start = start;
191 	req->size = size;
192 	debug("%d: type=%d, %08llx  %08llx\n", gd->arch.mtrr_req_count - 1,
193 	      req->type, req->start, req->size);
194 	mask = ~(req->size - 1);
195 	mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
196 	mask |= MTRR_PHYS_MASK_VALID;
197 	debug("   %016llx %016llx\n", req->start | req->type, mask);
198 
199 	return 0;
200 }
201 
mtrr_get_var_count(void)202 int mtrr_get_var_count(void)
203 {
204 	return msr_read(MSR_MTRR_CAP_MSR).lo & MSR_MTRR_CAP_VCNT;
205 }
206 
get_free_var_mtrr(void)207 static int get_free_var_mtrr(void)
208 {
209 	struct msr_t maskm;
210 	int vcnt;
211 	int i;
212 
213 	vcnt = mtrr_get_var_count();
214 
215 	/* Identify the first var mtrr which is not valid */
216 	for (i = 0; i < vcnt; i++) {
217 		maskm = msr_read(MTRR_PHYS_MASK_MSR(i));
218 		if ((maskm.lo & MTRR_PHYS_MASK_VALID) == 0)
219 			return i;
220 	}
221 
222 	/* No free var mtrr */
223 	return -ENOSPC;
224 }
225 
mtrr_set_next_var(uint type,uint64_t start,uint64_t size)226 int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
227 {
228 	int mtrr;
229 
230 	if (!is_power_of_2(size))
231 		return -EINVAL;
232 
233 	mtrr = get_free_var_mtrr();
234 	if (mtrr < 0)
235 		return mtrr;
236 
237 	set_var_mtrr(mtrr, type, start, size);
238 	debug("MTRR %x: start=%x, size=%x\n", mtrr, (uint)start, (uint)size);
239 
240 	return 0;
241 }
242 
243 /** enum mtrr_opcode - supported operations for mtrr_do_oper() */
244 enum mtrr_opcode {
245 	MTRR_OP_SET,
246 	MTRR_OP_SET_VALID,
247 };
248 
249 /**
250  * struct mtrr_oper - An MTRR operation to perform on a CPU
251  *
252  * @opcode: Indicates operation to perform
253  * @reg: MTRR reg number to select (0-7, -1 = all)
254  * @valid: Valid value to write for MTRR_OP_SET_VALID
255  * @base: Base value to write for MTRR_OP_SET
256  * @mask: Mask value to write for MTRR_OP_SET
257  */
258 struct mtrr_oper {
259 	enum mtrr_opcode opcode;
260 	int reg;
261 	bool valid;
262 	u64 base;
263 	u64 mask;
264 };
265 
mtrr_do_oper(void * arg)266 static void mtrr_do_oper(void *arg)
267 {
268 	struct mtrr_oper *oper = arg;
269 	u64 mask;
270 
271 	switch (oper->opcode) {
272 	case MTRR_OP_SET_VALID:
273 		mask = native_read_msr(MTRR_PHYS_MASK_MSR(oper->reg));
274 		if (oper->valid)
275 			mask |= MTRR_PHYS_MASK_VALID;
276 		else
277 			mask &= ~MTRR_PHYS_MASK_VALID;
278 		wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), mask);
279 		break;
280 	case MTRR_OP_SET:
281 		wrmsrl(MTRR_PHYS_BASE_MSR(oper->reg), oper->base);
282 		wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), oper->mask);
283 		break;
284 	}
285 }
286 
mtrr_start_op(int cpu_select,struct mtrr_oper * oper)287 static int mtrr_start_op(int cpu_select, struct mtrr_oper *oper)
288 {
289 	struct mtrr_state state;
290 	int ret;
291 
292 	mtrr_open(&state, true);
293 	ret = mp_run_on_cpus(cpu_select, mtrr_do_oper, oper);
294 	mtrr_close(&state, true);
295 	if (ret)
296 		return log_msg_ret("run", ret);
297 
298 	return 0;
299 }
300 
mtrr_set_valid(int cpu_select,int reg,bool valid)301 int mtrr_set_valid(int cpu_select, int reg, bool valid)
302 {
303 	struct mtrr_oper oper;
304 
305 	oper.opcode = MTRR_OP_SET_VALID;
306 	oper.reg = reg;
307 	oper.valid = valid;
308 
309 	return mtrr_start_op(cpu_select, &oper);
310 }
311 
mtrr_set(int cpu_select,int reg,u64 base,u64 mask)312 int mtrr_set(int cpu_select, int reg, u64 base, u64 mask)
313 {
314 	struct mtrr_oper oper;
315 
316 	oper.opcode = MTRR_OP_SET;
317 	oper.reg = reg;
318 	oper.base = base;
319 	oper.mask = mask;
320 
321 	return mtrr_start_op(cpu_select, &oper);
322 }
323