1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2008-2011
4  * Graeme Russ, <graeme.russ@gmail.com>
5  *
6  * (C) Copyright 2002
7  * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
8  *
9  * (C) Copyright 2002
10  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
11  * Marius Groeger <mgroeger@sysgo.de>
12  *
13  * (C) Copyright 2002
14  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
15  * Alex Zuepke <azu@sysgo.de>
16  *
17  * Part of this file is adapted from coreboot
18  * src/arch/x86/lib/cpu.c
19  */
20 
21 #define LOG_CATEGORY	UCLASS_CPU
22 
23 #include <bootstage.h>
24 #include <command.h>
25 #include <cpu_func.h>
26 #include <dm.h>
27 #include <errno.h>
28 #include <event.h>
29 #include <init.h>
30 #include <irq.h>
31 #include <log.h>
32 #include <malloc.h>
33 #include <syscon.h>
34 #include <acpi/acpi_s3.h>
35 #include <acpi/acpi_table.h>
36 #include <asm/acpi.h>
37 #include <asm/control_regs.h>
38 #include <asm/coreboot_tables.h>
39 #include <asm/cpu.h>
40 #include <asm/global_data.h>
41 #include <asm/lapic.h>
42 #include <asm/microcode.h>
43 #include <asm/mp.h>
44 #include <asm/mrccache.h>
45 #include <asm/msr.h>
46 #include <asm/mtrr.h>
47 #include <asm/post.h>
48 #include <asm/processor.h>
49 #include <asm/processor-flags.h>
50 #include <asm/interrupt.h>
51 #include <asm/tables.h>
52 #include <linux/compiler.h>
53 
54 DECLARE_GLOBAL_DATA_PTR;
55 
56 #ifndef CONFIG_TPL_BUILD
57 static const char *const x86_vendor_name[] = {
58 	[X86_VENDOR_INTEL]     = "Intel",
59 	[X86_VENDOR_CYRIX]     = "Cyrix",
60 	[X86_VENDOR_AMD]       = "AMD",
61 	[X86_VENDOR_UMC]       = "UMC",
62 	[X86_VENDOR_NEXGEN]    = "NexGen",
63 	[X86_VENDOR_CENTAUR]   = "Centaur",
64 	[X86_VENDOR_RISE]      = "Rise",
65 	[X86_VENDOR_TRANSMETA] = "Transmeta",
66 	[X86_VENDOR_NSC]       = "NSC",
67 	[X86_VENDOR_SIS]       = "SiS",
68 };
69 #endif
70 
x86_cleanup_before_linux(void)71 int __weak x86_cleanup_before_linux(void)
72 {
73 	int ret;
74 
75 	ret = mp_park_aps();
76 	if (ret)
77 		return log_msg_ret("park", ret);
78 	bootstage_stash_default();
79 
80 	return 0;
81 }
82 
x86_init_cache(void)83 int x86_init_cache(void)
84 {
85 	enable_caches();
86 
87 	return 0;
88 }
89 int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
90 
flush_cache(unsigned long dummy1,unsigned long dummy2)91 void  flush_cache(unsigned long dummy1, unsigned long dummy2)
92 {
93 	asm("wbinvd\n");
94 }
95 
96 /* Define these functions to allow ehch-hcd to function */
flush_dcache_range(unsigned long start,unsigned long stop)97 void flush_dcache_range(unsigned long start, unsigned long stop)
98 {
99 }
100 
invalidate_dcache_range(unsigned long start,unsigned long stop)101 void invalidate_dcache_range(unsigned long start, unsigned long stop)
102 {
103 }
104 
dcache_enable(void)105 void dcache_enable(void)
106 {
107 	enable_caches();
108 }
109 
dcache_disable(void)110 void dcache_disable(void)
111 {
112 	disable_caches();
113 }
114 
icache_enable(void)115 void icache_enable(void)
116 {
117 }
118 
icache_disable(void)119 void icache_disable(void)
120 {
121 }
122 
icache_status(void)123 int icache_status(void)
124 {
125 	return 1;
126 }
127 
128 #ifndef CONFIG_TPL_BUILD
cpu_vendor_name(int vendor)129 const char *cpu_vendor_name(int vendor)
130 {
131 	const char *name;
132 	name = "<invalid cpu vendor>";
133 	if (vendor < ARRAY_SIZE(x86_vendor_name) &&
134 	    x86_vendor_name[vendor])
135 		name = x86_vendor_name[vendor];
136 
137 	return name;
138 }
139 #endif
140 
cpu_get_name(char * name)141 char *cpu_get_name(char *name)
142 {
143 	unsigned int *name_as_ints = (unsigned int *)name;
144 	struct cpuid_result regs;
145 	char *ptr;
146 	int i;
147 
148 	/* This bit adds up to 48 bytes */
149 	for (i = 0; i < 3; i++) {
150 		regs = cpuid(0x80000002 + i);
151 		name_as_ints[i * 4 + 0] = regs.eax;
152 		name_as_ints[i * 4 + 1] = regs.ebx;
153 		name_as_ints[i * 4 + 2] = regs.ecx;
154 		name_as_ints[i * 4 + 3] = regs.edx;
155 	}
156 	name[CPU_MAX_NAME_LEN - 1] = '\0';
157 
158 	/* Skip leading spaces. */
159 	ptr = name;
160 	while (*ptr == ' ')
161 		ptr++;
162 
163 	return ptr;
164 }
165 
166 #if !CONFIG_IS_ENABLED(CPU)
print_cpuinfo(void)167 int print_cpuinfo(void)
168 {
169 	post_code(POST_CPU_INFO);
170 
171 	printf("CPU: %s, vendor %s, device %xh\n",
172 	       cpu_has_64bit() ? "x86_64" : "x86",
173 	       cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
174 
175 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
176 		debug("ACPI previous sleep state: %s\n",
177 		      acpi_ss_string(gd->arch.prev_sleep_state));
178 	}
179 
180 	return 0;
181 }
182 #endif
183 
184 #if CONFIG_IS_ENABLED(SHOW_BOOT_PROGRESS)
show_boot_progress(int val)185 void show_boot_progress(int val)
186 {
187 	outb(val, POST_PORT);
188 }
189 #endif
190 
191 #if !defined(CONFIG_SYS_COREBOOT) && !defined(CONFIG_EFI_STUB) && \
192 	!defined(CONFIG_XPL_BUILD)
193 /*
194  * Implement a weak default function for boards that need to do some final init
195  * before the system is ready.
196  */
board_final_init(void)197 __weak void board_final_init(void)
198 {
199 }
200 
201 /*
202  * Implement a weak default function for boards that need to do some final
203  * processing before booting the OS.
204  */
board_final_cleanup(void)205 __weak void board_final_cleanup(void)
206 {
207 }
208 
last_stage_init(void)209 static int last_stage_init(void)
210 {
211 	struct acpi_fadt __maybe_unused *fadt;
212 	int ret;
213 
214 	board_final_init();
215 
216 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
217 		fadt = acpi_find_fadt();
218 
219 		if (fadt && gd->arch.prev_sleep_state == ACPI_S3)
220 			acpi_resume(fadt);
221 	}
222 
223 	ret = write_tables();
224 	if (ret) {
225 		log_err("Failed to write tables\n");
226 		return log_msg_ret("table", ret);
227 	}
228 
229 	if (IS_ENABLED(CONFIG_GENERATE_ACPI_TABLE)) {
230 		fadt = acpi_find_fadt();
231 
232 		/* Don't touch ACPI hardware on HW reduced platforms */
233 		if (fadt && !(fadt->flags & ACPI_FADT_HW_REDUCED_ACPI)) {
234 			/*
235 			 * Other than waiting for OSPM to request us to switch
236 			 * to ACPI * mode, do it by ourselves, since SMI will
237 			 * not be triggered.
238 			 */
239 			enter_acpi_mode(fadt->pm1a_cnt_blk);
240 		}
241 	}
242 
243 	/*
244 	 * TODO(sjg@chromium.org): Move this to bootm_announce_and_cleanup()
245 	 * once APL FSP-S at 0x200000 does not overlap with the bzimage at
246 	 * 0x100000.
247 	 */
248 	board_final_cleanup();
249 
250 	return 0;
251 }
252 EVENT_SPY_SIMPLE(EVT_LAST_STAGE_INIT, last_stage_init);
253 
254 #endif  /* !SYS_COREBOOT && !EFI_STUB && !XPL_BUILD */
255 
x86_init_cpus(void)256 static int x86_init_cpus(void)
257 {
258 	if (IS_ENABLED(CONFIG_SMP)) {
259 		debug("Init additional CPUs\n");
260 		x86_mp_init();
261 	} else {
262 		struct udevice *dev;
263 
264 		/*
265 		 * This causes the cpu-x86 driver to be probed.
266 		 * We don't check return value here as we want to allow boards
267 		 * which have not been converted to use cpu uclass driver to
268 		 * boot.
269 		 */
270 		uclass_first_device(UCLASS_CPU, &dev);
271 	}
272 
273 	return 0;
274 }
275 
cpu_init_r(void)276 int cpu_init_r(void)
277 {
278 	struct udevice *dev;
279 	int ret;
280 
281 	if (!ll_boot_init()) {
282 		uclass_first_device(UCLASS_PCI, &dev);
283 		return 0;
284 	}
285 
286 	ret = x86_init_cpus();
287 	if (ret)
288 		return ret;
289 
290 	/*
291 	 * Set up the northbridge, PCH and LPC if available. Note that these
292 	 * may have had some limited pre-relocation init if they were probed
293 	 * before relocation, but this is post relocation.
294 	 */
295 	uclass_first_device(UCLASS_NORTHBRIDGE, &dev);
296 	uclass_first_device(UCLASS_PCH, &dev);
297 	uclass_first_device(UCLASS_LPC, &dev);
298 
299 	/* Set up pin control if available */
300 	ret = syscon_get_by_driver_data(X86_SYSCON_PINCONF, &dev);
301 	debug("%s, pinctrl=%p, ret=%d\n", __func__, dev, ret);
302 
303 	return 0;
304 }
305 
306 #ifndef CONFIG_EFI_STUB
reserve_arch(void)307 int reserve_arch(void)
308 {
309 	struct udevice *itss;
310 	int ret;
311 
312 	if (IS_ENABLED(CONFIG_ENABLE_MRC_CACHE))
313 		mrccache_reserve();
314 
315 	if (IS_ENABLED(CONFIG_SEABIOS))
316 		high_table_reserve();
317 
318 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
319 		acpi_s3_reserve();
320 
321 		if (IS_ENABLED(CONFIG_HAVE_FSP)) {
322 			/*
323 			 * Save stack address to CMOS so that at next S3 boot,
324 			 * we can use it as the stack address for fsp_continue()
325 			 */
326 			fsp_save_s3_stack();
327 		}
328 	}
329 	ret = irq_first_device_type(X86_IRQT_ITSS, &itss);
330 	if (!ret) {
331 		/*
332 		 * Snapshot the current GPIO IRQ polarities. FSP-S is about to
333 		 * run and will set a default policy that doesn't honour boards'
334 		 * requirements
335 		 */
336 		irq_snapshot_polarities(itss);
337 	}
338 
339 	return 0;
340 }
341 #endif
342 
detect_coreboot_table_at(ulong start,ulong size)343 static long detect_coreboot_table_at(ulong start, ulong size)
344 {
345 	u32 *ptr, *end;
346 
347 	size /= 4;
348 	for (ptr = (void *)start, end = ptr + size; ptr < end; ptr += 4) {
349 		if (*ptr == 0x4f49424c) /* "LBIO" */
350 			return (long)ptr;
351 	}
352 
353 	return -ENOENT;
354 }
355 
locate_coreboot_table(void)356 long locate_coreboot_table(void)
357 {
358 	long addr;
359 
360 	/* We look for LBIO from addresses 1K-4K and again at 960KB */
361 	addr = detect_coreboot_table_at(0x400, 0xc00);
362 	if (addr < 0)
363 		addr = detect_coreboot_table_at(0xf0000, 0x1000);
364 
365 	return addr;
366 }
367 
has_cpuid(void)368 static bool has_cpuid(void)
369 {
370 	return flag_is_changeable_p(X86_EFLAGS_ID);
371 }
372 
cpu_cpuid_extended_level(void)373 static uint cpu_cpuid_extended_level(void)
374 {
375 	return cpuid_eax(0x80000000);
376 }
377 
cpu_phys_address_size(void)378 int cpu_phys_address_size(void)
379 {
380 	if (!has_cpuid())
381 		return 32;
382 
383 	if (cpu_cpuid_extended_level() >= 0x80000008)
384 		return cpuid_eax(0x80000008) & 0xff;
385 
386 	if (cpuid_edx(1) & (CPUID_FEATURE_PAE | CPUID_FEATURE_PSE36))
387 		return 36;
388 
389 	return 32;
390 }
391