1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2014 Google, Inc
4  * Portions added from coreboot
5  *
6  * Memory Type Range Regsters - these are used to tell the CPU whether
7  * memory is cacheable and if so the cache write mode to use.
8  *
9  * These can speed up booting. See the mtrr command.
10  *
11  * Reference: Intel Architecture Software Developer's Manual, Volume 3:
12  * System Programming
13  */
14 
15 /*
16  * Note that any console output (e.g. debug()) in this file will likely fail
17  * since the MTRR registers are sometimes in flux.
18  */
19 
20 #include <cpu.h>
21 #include <cpu_func.h>
22 #include <log.h>
23 #include <sort.h>
24 #include <asm/cache.h>
25 #include <asm/global_data.h>
26 #include <asm/io.h>
27 #include <asm/mp.h>
28 #include <asm/msr.h>
29 #include <asm/mtrr.h>
30 #include <linux/log2.h>
31 
32 DECLARE_GLOBAL_DATA_PTR;
33 
34 static const char *const mtrr_type_name[MTRR_TYPE_COUNT] = {
35 	"Uncacheable",
36 	"Combine",
37 	"2",
38 	"3",
39 	"Through",
40 	"Protect",
41 	"Back",
42 };
43 
mtrr_to_size(u64 mask)44 u64 mtrr_to_size(u64 mask)
45 {
46 	u64 size;
47 
48 	size = ~mask & ((1ULL << cpu_phys_address_size()) - 1);
49 	size |= (1 << 12) - 1;
50 	size += 1;
51 
52 	return size;
53 }
54 
mtrr_to_mask(u64 size)55 u64 mtrr_to_mask(u64 size)
56 {
57 	u64 mask;
58 
59 	mask = ~(size - 1);
60 	mask &= (1ull << cpu_phys_address_size()) - 1;
61 
62 	return mask;
63 }
64 
65 /* Prepare to adjust MTRRs */
mtrr_open(struct mtrr_state * state,bool do_caches)66 void mtrr_open(struct mtrr_state *state, bool do_caches)
67 {
68 	if (!gd->arch.has_mtrr)
69 		return;
70 
71 	if (do_caches) {
72 		state->enable_cache = dcache_status();
73 
74 		if (state->enable_cache)
75 			disable_caches();
76 	}
77 	state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
78 	wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
79 }
80 
81 /* Clean up after adjusting MTRRs, and enable them */
mtrr_close(struct mtrr_state * state,bool do_caches)82 void mtrr_close(struct mtrr_state *state, bool do_caches)
83 {
84 	if (!gd->arch.has_mtrr)
85 		return;
86 
87 	wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
88 	if (do_caches && state->enable_cache)
89 		enable_caches();
90 }
91 
set_var_mtrr(uint reg,uint type,uint64_t start,uint64_t size)92 static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
93 {
94 	u64 mask = mtrr_to_mask(size);
95 
96 	wrmsrl(MTRR_PHYS_BASE_MSR(reg), start | type);
97 	wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
98 }
99 
mtrr_read_all(struct mtrr_info * info)100 void mtrr_read_all(struct mtrr_info *info)
101 {
102 	int reg_count = mtrr_get_var_count();
103 	int i;
104 
105 	for (i = 0; i < reg_count; i++) {
106 		info->mtrr[i].base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
107 		info->mtrr[i].mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
108 	}
109 }
110 
mtrr_write_all(struct mtrr_info * info)111 static void mtrr_write_all(struct mtrr_info *info)
112 {
113 	int reg_count = mtrr_get_var_count();
114 	struct mtrr_state state;
115 	int i;
116 
117 	for (i = 0; i < reg_count; i++) {
118 		mtrr_open(&state, true);
119 		wrmsrl(MTRR_PHYS_BASE_MSR(i), info->mtrr[i].base);
120 		wrmsrl(MTRR_PHYS_MASK_MSR(i), info->mtrr[i].mask);
121 		mtrr_close(&state, true);
122 	}
123 }
124 
write_mtrrs(void * arg)125 static void write_mtrrs(void *arg)
126 {
127 	struct mtrr_info *info = arg;
128 
129 	mtrr_write_all(info);
130 }
131 
read_mtrrs(void * arg)132 static void read_mtrrs(void *arg)
133 {
134 	struct mtrr_info *info = arg;
135 
136 	mtrr_read_all(info);
137 }
138 
139 /**
140  * mtrr_copy_to_aps() - Copy the MTRRs from the boot CPU to other CPUs
141  *
142  * Return: 0 on success, -ve on failure
143  */
mtrr_copy_to_aps(void)144 static int mtrr_copy_to_aps(void)
145 {
146 	struct mtrr_info info;
147 	int ret;
148 
149 	ret = mp_run_on_cpus(MP_SELECT_BSP, read_mtrrs, &info);
150 	if (ret == -ENXIO)
151 		return 0;
152 	else if (ret)
153 		return log_msg_ret("bsp", ret);
154 
155 	ret = mp_run_on_cpus(MP_SELECT_APS, write_mtrrs, &info);
156 	if (ret)
157 		return log_msg_ret("bsp", ret);
158 
159 	return 0;
160 }
161 
h_comp_mtrr(const void * p1,const void * p2)162 static int h_comp_mtrr(const void *p1, const void *p2)
163 {
164 	const struct mtrr_request *req1 = p1;
165 	const struct mtrr_request *req2 = p2;
166 
167 	s64 diff = req1->start - req2->start;
168 
169 	return diff < 0 ? -1 : diff > 0 ? 1 : 0;
170 }
171 
mtrr_commit(bool do_caches)172 int mtrr_commit(bool do_caches)
173 {
174 	struct mtrr_request *req = gd->arch.mtrr_req;
175 	struct mtrr_state state;
176 	int ret;
177 	int i;
178 
179 	debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
180 	      gd->arch.mtrr_req_count);
181 	if (!gd->arch.has_mtrr)
182 		return -ENOSYS;
183 
184 	debug("open\n");
185 	mtrr_open(&state, do_caches);
186 	debug("open done\n");
187 	qsort(req, gd->arch.mtrr_req_count, sizeof(*req), h_comp_mtrr);
188 	for (i = 0; i < gd->arch.mtrr_req_count; i++, req++)
189 		set_var_mtrr(i, req->type, req->start, req->size);
190 
191 	/* Clear the ones that are unused */
192 	debug("clear\n");
193 	for (; i < mtrr_get_var_count(); i++)
194 		wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
195 	debug("close\n");
196 	mtrr_close(&state, do_caches);
197 	debug("mtrr done\n");
198 
199 	if (gd->flags & GD_FLG_RELOC) {
200 		ret = mtrr_copy_to_aps();
201 		if (ret)
202 			return log_msg_ret("copy", ret);
203 	}
204 
205 	return 0;
206 }
207 
208 /* fms: find most significant bit set (from Linux)  */
fms(uint val)209 static inline uint fms(uint val)
210 {
211 	uint ret;
212 
213 	__asm__("bsrl %1,%0\n\t"
214 		"jnz 1f\n\t"
215 		"movl $0,%0\n"
216 		"1:" : "=r" (ret) : "mr" (val));
217 
218 	return ret;
219 }
220 
221 /*
222  * fms64: find most significant bit set in a 64-bit word
223  * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
224  * fms64(0x40400000000) = 42.
225  */
fms64(uint64_t val)226 static uint fms64(uint64_t val)
227 {
228 	u32 hi = (u32)(val >> 32);
229 
230 	if (!hi)
231 		return fms((u32)val);
232 
233 	return fms(hi) + 32;
234 }
235 
mtrr_add_request(int type,u64 base,uint64_t size)236 int mtrr_add_request(int type, u64 base, uint64_t size)
237 {
238 	struct mtrr_request *req;
239 	u64 mask;
240 
241 	debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
242 	if (!gd->arch.has_mtrr)
243 		return -ENOSYS;
244 
245 	while (size) {
246 		uint addr_lsb;
247 		uint size_msb;
248 		u64 mtrr_size;
249 
250 		addr_lsb = fls64(base);
251 		size_msb = fms64(size);
252 
253 		/*
254 		 * All MTRR entries need to have their base aligned to the
255 		 * mask size. The maximum size is calculated by a function of
256 		 * the min base bit set and maximum size bit set.
257 		 * Algorithm is from coreboot
258 		 */
259 		if (!addr_lsb || addr_lsb > size_msb)
260 			mtrr_size = 1ull << size_msb;
261 		else
262 			mtrr_size = 1ull << addr_lsb;
263 		log_debug("addr_lsb %x size_msb %x mtrr_size %llx\n",
264 			  addr_lsb, size_msb, mtrr_size);
265 
266 		if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
267 			return -ENOSPC;
268 		req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
269 		req->type = type;
270 		req->start = base;
271 		req->size = mtrr_size;
272 		log_debug("%d: type=%d, %08llx  %08llx ",
273 			  gd->arch.mtrr_req_count - 1, req->type, req->start,
274 			  req->size);
275 		mask = mtrr_to_mask(req->size);
276 		mask |= MTRR_PHYS_MASK_VALID;
277 		log_debug("   %016llx %016llx\n", req->start | req->type, mask);
278 
279 		size -= mtrr_size;
280 		base += mtrr_size;
281 	}
282 
283 	return 0;
284 }
285 
mtrr_get_var_count(void)286 int mtrr_get_var_count(void)
287 {
288 	return msr_read(MSR_MTRR_CAP_MSR).lo & MSR_MTRR_CAP_VCNT;
289 }
290 
get_free_var_mtrr(void)291 static int get_free_var_mtrr(void)
292 {
293 	struct msr_t maskm;
294 	int vcnt;
295 	int i;
296 
297 	vcnt = mtrr_get_var_count();
298 
299 	/* Identify the first var mtrr which is not valid */
300 	for (i = 0; i < vcnt; i++) {
301 		maskm = msr_read(MTRR_PHYS_MASK_MSR(i));
302 		if ((maskm.lo & MTRR_PHYS_MASK_VALID) == 0)
303 			return i;
304 	}
305 
306 	/* No free var mtrr */
307 	return -ENOSPC;
308 }
309 
mtrr_set_next_var(uint type,uint64_t start,uint64_t size)310 int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
311 {
312 	int mtrr;
313 
314 	if (!is_power_of_2(size))
315 		return -EINVAL;
316 
317 	mtrr = get_free_var_mtrr();
318 	if (mtrr < 0)
319 		return mtrr;
320 
321 	set_var_mtrr(mtrr, type, start, size);
322 	debug("MTRR %x: start=%x, size=%x\n", mtrr, (uint)start, (uint)size);
323 
324 	return 0;
325 }
326 
327 /** enum mtrr_opcode - supported operations for mtrr_do_oper() */
328 enum mtrr_opcode {
329 	MTRR_OP_SET,
330 	MTRR_OP_SET_VALID,
331 };
332 
333 /**
334  * struct mtrr_oper - An MTRR operation to perform on a CPU
335  *
336  * @opcode: Indicates operation to perform
337  * @reg: MTRR reg number to select (0-7, -1 = all)
338  * @valid: Valid value to write for MTRR_OP_SET_VALID
339  * @base: Base value to write for MTRR_OP_SET
340  * @mask: Mask value to write for MTRR_OP_SET
341  */
342 struct mtrr_oper {
343 	enum mtrr_opcode opcode;
344 	int reg;
345 	bool valid;
346 	u64 base;
347 	u64 mask;
348 };
349 
mtrr_do_oper(void * arg)350 static void mtrr_do_oper(void *arg)
351 {
352 	struct mtrr_oper *oper = arg;
353 	u64 mask;
354 
355 	switch (oper->opcode) {
356 	case MTRR_OP_SET_VALID:
357 		mask = native_read_msr(MTRR_PHYS_MASK_MSR(oper->reg));
358 		if (oper->valid)
359 			mask |= MTRR_PHYS_MASK_VALID;
360 		else
361 			mask &= ~MTRR_PHYS_MASK_VALID;
362 		wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), mask);
363 		break;
364 	case MTRR_OP_SET:
365 		wrmsrl(MTRR_PHYS_BASE_MSR(oper->reg), oper->base);
366 		wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), oper->mask);
367 		break;
368 	}
369 }
370 
mtrr_start_op(int cpu_select,struct mtrr_oper * oper)371 static int mtrr_start_op(int cpu_select, struct mtrr_oper *oper)
372 {
373 	struct mtrr_state state;
374 	int ret;
375 
376 	mtrr_open(&state, true);
377 	ret = mp_run_on_cpus(cpu_select, mtrr_do_oper, oper);
378 	mtrr_close(&state, true);
379 	if (ret)
380 		return log_msg_ret("run", ret);
381 
382 	return 0;
383 }
384 
mtrr_set_valid(int cpu_select,int reg,bool valid)385 int mtrr_set_valid(int cpu_select, int reg, bool valid)
386 {
387 	struct mtrr_oper oper;
388 
389 	oper.opcode = MTRR_OP_SET_VALID;
390 	oper.reg = reg;
391 	oper.valid = valid;
392 
393 	return mtrr_start_op(cpu_select, &oper);
394 }
395 
mtrr_set(int cpu_select,int reg,u64 base,u64 mask)396 int mtrr_set(int cpu_select, int reg, u64 base, u64 mask)
397 {
398 	struct mtrr_oper oper;
399 
400 	oper.opcode = MTRR_OP_SET;
401 	oper.reg = reg;
402 	oper.base = base;
403 	oper.mask = mask;
404 
405 	return mtrr_start_op(cpu_select, &oper);
406 }
407 
read_mtrrs_(void * arg)408 static void read_mtrrs_(void *arg)
409 {
410 	struct mtrr_info *info = arg;
411 
412 	mtrr_read_all(info);
413 }
414 
mtrr_list(int reg_count,int cpu_select)415 int mtrr_list(int reg_count, int cpu_select)
416 {
417 	struct mtrr_info info;
418 	int ret;
419 	int i;
420 
421 	printf("Reg Valid Write-type   %-16s %-16s %-16s\n", "Base   ||",
422 	       "Mask   ||", "Size   ||");
423 	memset(&info, '\0', sizeof(info));
424 	ret = mp_run_on_cpus(cpu_select, read_mtrrs_, &info);
425 	if (ret)
426 		return log_msg_ret("run", ret);
427 	for (i = 0; i < reg_count; i++) {
428 		const char *type = "Invalid";
429 		u64 base, mask, size;
430 		bool valid;
431 
432 		base = info.mtrr[i].base;
433 		mask = info.mtrr[i].mask;
434 		size = mtrr_to_size(mask);
435 		valid = mask & MTRR_PHYS_MASK_VALID;
436 		type = mtrr_type_name[base & MTRR_BASE_TYPE_MASK];
437 		printf("%d   %-5s %-12s %016llx %016llx %016llx\n", i,
438 		       valid ? "Y" : "N", type, base & ~MTRR_BASE_TYPE_MASK,
439 		       mask & ~MTRR_PHYS_MASK_VALID, size);
440 	}
441 
442 	return 0;
443 }
444 
mtrr_get_type_by_name(const char * typename)445 int mtrr_get_type_by_name(const char *typename)
446 {
447 	int i;
448 
449 	for (i = 0; i < MTRR_TYPE_COUNT; i++) {
450 		if (*typename == *mtrr_type_name[i])
451 			return i;
452 	}
453 
454 	return -EINVAL;
455 };
456