1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Generic Intel ACPI table generation
4  *
5  * Copyright (C) 2017 Intel Corp.
6  * Copyright 2019 Google LLC
7  *
8  * Modified from coreboot src/soc/intel/common/block/acpi.c
9  */
10 
11 #include <common.h>
12 #include <bloblist.h>
13 #include <cpu.h>
14 #include <dm.h>
15 #include <acpi/acpigen.h>
16 #include <asm/acpigen.h>
17 #include <asm/acpi_table.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu_common.h>
20 #include <asm/global_data.h>
21 #include <asm/intel_acpi.h>
22 #include <asm/ioapic.h>
23 #include <asm/mpspec.h>
24 #include <asm/smm.h>
25 #include <asm/turbo.h>
26 #include <asm/intel_gnvs.h>
27 #include <asm/arch/iomap.h>
28 #include <asm/arch/pm.h>
29 #include <asm/arch/systemagent.h>
30 #include <dm/acpi.h>
31 #include <linux/err.h>
32 #include <power/acpi_pmc.h>
33 
acpi_fill_mcfg(struct acpi_ctx * ctx)34 int acpi_fill_mcfg(struct acpi_ctx *ctx)
35 {
36 	size_t size;
37 
38 	/* PCI Segment Group 0, Start Bus Number 0, End Bus Number is 255 */
39 	size = acpi_create_mcfg_mmconfig((void *)ctx->current,
40 					 CONFIG_MMCONF_BASE_ADDRESS, 0, 0,
41 					 (CONFIG_SA_PCIEX_LENGTH >> 20) - 1);
42 	acpi_inc(ctx, size);
43 
44 	return 0;
45 }
46 
acpi_sci_irq(void)47 static int acpi_sci_irq(void)
48 {
49 	int sci_irq = 9;
50 	uint scis;
51 	int ret;
52 
53 	ret = arch_read_sci_irq_select();
54 	if (IS_ERR_VALUE(ret))
55 		return log_msg_ret("sci_irq", ret);
56 	scis = ret;
57 	scis &= SCI_IRQ_MASK;
58 	scis >>= SCI_IRQ_SHIFT;
59 
60 	/* Determine how SCI is routed. */
61 	switch (scis) {
62 	case SCIS_IRQ9:
63 	case SCIS_IRQ10:
64 	case SCIS_IRQ11:
65 		sci_irq = scis - SCIS_IRQ9 + 9;
66 		break;
67 	case SCIS_IRQ20:
68 	case SCIS_IRQ21:
69 	case SCIS_IRQ22:
70 	case SCIS_IRQ23:
71 		sci_irq = scis - SCIS_IRQ20 + 20;
72 		break;
73 	default:
74 		log_warning("Invalid SCI route! Defaulting to IRQ9\n");
75 		sci_irq = 9;
76 		break;
77 	}
78 
79 	log_debug("SCI is IRQ%d\n", sci_irq);
80 
81 	return sci_irq;
82 }
83 
acpi_madt_irq_overrides(unsigned long current)84 static unsigned long acpi_madt_irq_overrides(unsigned long current)
85 {
86 	int sci = acpi_sci_irq();
87 	u16 flags = MP_IRQ_TRIGGER_LEVEL;
88 
89 	if (sci < 0)
90 		return log_msg_ret("sci irq", sci);
91 
92 	/* INT_SRC_OVR */
93 	current += acpi_create_madt_irqoverride((void *)current, 0, 0, 2, 0);
94 
95 	flags |= arch_madt_sci_irq_polarity(sci);
96 
97 	/* SCI */
98 	current +=
99 	    acpi_create_madt_irqoverride((void *)current, 0, sci, sci, flags);
100 
101 	return current;
102 }
103 
acpi_fill_madt(u32 current)104 u32 acpi_fill_madt(u32 current)
105 {
106 	/* Local APICs */
107 	current += acpi_create_madt_lapics(current);
108 
109 	/* IOAPIC */
110 	current += acpi_create_madt_ioapic((void *)current, 2, IO_APIC_ADDR, 0);
111 
112 	return acpi_madt_irq_overrides(current);
113 }
114 
intel_acpi_fill_fadt(struct acpi_fadt * fadt)115 void intel_acpi_fill_fadt(struct acpi_fadt *fadt)
116 {
117 	const u16 pmbase = IOMAP_ACPI_BASE;
118 
119 	/* Use ACPI 3.0 revision. */
120 	fadt->header.revision = acpi_get_table_revision(ACPITAB_FADT);
121 
122 	fadt->sci_int = acpi_sci_irq();
123 	fadt->smi_cmd = APM_CNT;
124 	fadt->acpi_enable = APM_CNT_ACPI_ENABLE;
125 	fadt->acpi_disable = APM_CNT_ACPI_DISABLE;
126 	fadt->s4bios_req = 0x0;
127 	fadt->pstate_cnt = 0;
128 
129 	fadt->pm1a_evt_blk = pmbase + PM1_STS;
130 	fadt->pm1b_evt_blk = 0x0;
131 	fadt->pm1a_cnt_blk = pmbase + PM1_CNT;
132 	fadt->pm1b_cnt_blk = 0x0;
133 
134 	fadt->gpe0_blk = pmbase + GPE0_STS;
135 
136 	fadt->pm1_evt_len = 4;
137 	fadt->pm1_cnt_len = 2;
138 
139 	/* GPE0 STS/EN pairs each 32 bits wide. */
140 	fadt->gpe0_blk_len = 2 * GPE0_REG_MAX * sizeof(uint32_t);
141 
142 	fadt->flush_size = 0x400;	/* twice of cache size */
143 	fadt->flush_stride = 0x10;	/* Cache line width  */
144 	fadt->duty_offset = 1;
145 	fadt->day_alrm = 0xd;
146 
147 	fadt->flags = ACPI_FADT_WBINVD | ACPI_FADT_C1_SUPPORTED |
148 	    ACPI_FADT_C2_MP_SUPPORTED | ACPI_FADT_SLEEP_BUTTON |
149 	    ACPI_FADT_RESET_REGISTER | ACPI_FADT_SEALED_CASE |
150 	    ACPI_FADT_S4_RTC_WAKE | ACPI_FADT_PLATFORM_CLOCK;
151 
152 	fadt->reset_reg.space_id = 1;
153 	fadt->reset_reg.bit_width = 8;
154 	fadt->reset_reg.addrl = IO_PORT_RESET;
155 	fadt->reset_value = RST_CPU | SYS_RST;
156 
157 	fadt->x_pm1a_evt_blk.space_id = 1;
158 	fadt->x_pm1a_evt_blk.bit_width = fadt->pm1_evt_len * 8;
159 	fadt->x_pm1a_evt_blk.addrl = pmbase + PM1_STS;
160 
161 	fadt->x_pm1b_evt_blk.space_id = 1;
162 
163 	fadt->x_pm1a_cnt_blk.space_id = 1;
164 	fadt->x_pm1a_cnt_blk.bit_width = fadt->pm1_cnt_len * 8;
165 	fadt->x_pm1a_cnt_blk.addrl = pmbase + PM1_CNT;
166 
167 	fadt->x_pm1b_cnt_blk.space_id = 1;
168 
169 	fadt->x_gpe1_blk.space_id = 1;
170 }
171 
intel_southbridge_write_acpi_tables(const struct udevice * dev,struct acpi_ctx * ctx)172 int intel_southbridge_write_acpi_tables(const struct udevice *dev,
173 					struct acpi_ctx *ctx)
174 {
175 	int ret;
176 
177 	ret = acpi_write_dbg2_pci_uart(ctx, gd->cur_serial_dev,
178 				       ACPI_ACCESS_SIZE_DWORD_ACCESS);
179 	if (ret)
180 		return log_msg_ret("dbg2", ret);
181 
182 	ret = acpi_write_hpet(ctx);
183 	if (ret)
184 		return log_msg_ret("hpet", ret);
185 
186 	return 0;
187 }
188 
acpi_fill_soc_wake(u32 generic_pm1_en,const struct chipset_power_state * ps)189 __weak u32 acpi_fill_soc_wake(u32 generic_pm1_en,
190 			      const struct chipset_power_state *ps)
191 {
192 	return generic_pm1_en;
193 }
194 
acpi_create_gnvs(struct acpi_global_nvs * gnvs)195 __weak int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
196 {
197 	return 0;
198 }
199 
southbridge_inject_dsdt(const struct udevice * dev,struct acpi_ctx * ctx)200 int southbridge_inject_dsdt(const struct udevice *dev, struct acpi_ctx *ctx)
201 {
202 	struct acpi_global_nvs *gnvs;
203 	int ret;
204 
205 	ret = bloblist_ensure_size(BLOBLISTT_ACPI_GNVS, sizeof(*gnvs), 0,
206 				   (void **)&gnvs);
207 	if (ret)
208 		return log_msg_ret("bloblist", ret);
209 
210 	ret = acpi_create_gnvs(gnvs);
211 	if (ret)
212 		return log_msg_ret("gnvs", ret);
213 
214 	/*
215 	 * TODO(sjg@chromum.org): tell SMI about it
216 	 * smm_setup_structures(gnvs, NULL, NULL);
217 	 */
218 
219 	/* Add it to DSDT */
220 	acpigen_write_scope(ctx, "\\");
221 	acpigen_write_name_dword(ctx, "NVSA", (uintptr_t)gnvs);
222 	acpigen_pop_len(ctx);
223 
224 	return 0;
225 }
226 
calculate_power(int tdp,int p1_ratio,int ratio)227 static int calculate_power(int tdp, int p1_ratio, int ratio)
228 {
229 	u32 m;
230 	u32 power;
231 
232 	/*
233 	 * M = ((1.1 - ((p1_ratio - ratio) * 0.00625)) / 1.1) ^ 2
234 	 *
235 	 * Power = (ratio / p1_ratio) * m * tdp
236 	 */
237 
238 	m = (110000 - ((p1_ratio - ratio) * 625)) / 11;
239 	m = (m * m) / 1000;
240 
241 	power = ((ratio * 100000 / p1_ratio) / 100);
242 	power *= (m / 100) * (tdp / 1000);
243 	power /= 1000;
244 
245 	return power;
246 }
247 
generate_p_state_entries(struct acpi_ctx * ctx,int core,int cores_per_package)248 void generate_p_state_entries(struct acpi_ctx *ctx, int core,
249 			      int cores_per_package)
250 {
251 	int ratio_min, ratio_max, ratio_turbo, ratio_step;
252 	int coord_type, power_max, num_entries;
253 	int ratio, power, clock, clock_max;
254 	bool turbo;
255 
256 	coord_type = cpu_get_coord_type();
257 	ratio_min = cpu_get_min_ratio();
258 	ratio_max = cpu_get_max_ratio();
259 	clock_max = (ratio_max * cpu_get_bus_clock_khz()) / 1000;
260 	turbo = (turbo_get_state() == TURBO_ENABLED);
261 
262 	/* Calculate CPU TDP in mW */
263 	power_max = cpu_get_power_max();
264 
265 	/* Write _PCT indicating use of FFixedHW */
266 	acpigen_write_empty_pct(ctx);
267 
268 	/* Write _PPC with no limit on supported P-state */
269 	acpigen_write_ppc_nvs(ctx);
270 	/* Write PSD indicating configured coordination type */
271 	acpigen_write_psd_package(ctx, core, 1, coord_type);
272 
273 	/* Add P-state entries in _PSS table */
274 	acpigen_write_name(ctx, "_PSS");
275 
276 	/* Determine ratio points */
277 	ratio_step = PSS_RATIO_STEP;
278 	do {
279 		num_entries = ((ratio_max - ratio_min) / ratio_step) + 1;
280 		if (((ratio_max - ratio_min) % ratio_step) > 0)
281 			num_entries += 1;
282 		if (turbo)
283 			num_entries += 1;
284 		if (num_entries > PSS_MAX_ENTRIES)
285 			ratio_step += 1;
286 	} while (num_entries > PSS_MAX_ENTRIES);
287 
288 	/* _PSS package count depends on Turbo */
289 	acpigen_write_package(ctx, num_entries);
290 
291 	/* P[T] is Turbo state if enabled */
292 	if (turbo) {
293 		ratio_turbo = cpu_get_max_turbo_ratio();
294 
295 		/* Add entry for Turbo ratio */
296 		acpigen_write_pss_package(ctx, clock_max + 1,	/* MHz */
297 					  power_max,		/* mW */
298 					  PSS_LATENCY_TRANSITION,/* lat1 */
299 					  PSS_LATENCY_BUSMASTER,/* lat2 */
300 					  ratio_turbo << 8,	/* control */
301 					  ratio_turbo << 8);	/* status */
302 		num_entries -= 1;
303 	}
304 
305 	/* First regular entry is max non-turbo ratio */
306 	acpigen_write_pss_package(ctx, clock_max,	/* MHz */
307 				  power_max,		/* mW */
308 				  PSS_LATENCY_TRANSITION,/* lat1 */
309 				  PSS_LATENCY_BUSMASTER,/* lat2 */
310 				  ratio_max << 8,	/* control */
311 				  ratio_max << 8);	/* status */
312 	num_entries -= 1;
313 
314 	/* Generate the remaining entries */
315 	for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
316 	     ratio >= ratio_min; ratio -= ratio_step) {
317 		/* Calculate power at this ratio */
318 		power = calculate_power(power_max, ratio_max, ratio);
319 		clock = (ratio * cpu_get_bus_clock_khz()) / 1000;
320 
321 		acpigen_write_pss_package(ctx, clock,		/* MHz */
322 					  power,		/* mW */
323 					  PSS_LATENCY_TRANSITION,/* lat1 */
324 					  PSS_LATENCY_BUSMASTER,/* lat2 */
325 					  ratio << 8,		/* control */
326 					  ratio << 8);		/* status */
327 	}
328 	/* Fix package length */
329 	acpigen_pop_len(ctx);
330 }
331 
generate_t_state_entries(struct acpi_ctx * ctx,int core,int cores_per_package,struct acpi_tstate * entry,int nentries)332 void generate_t_state_entries(struct acpi_ctx *ctx, int core,
333 			      int cores_per_package, struct acpi_tstate *entry,
334 			      int nentries)
335 {
336 	if (!nentries)
337 		return;
338 
339 	/* Indicate SW_ALL coordination for T-states */
340 	acpigen_write_tsd_package(ctx, core, cores_per_package, SW_ALL);
341 
342 	/* Indicate FixedHW so OS will use MSR */
343 	acpigen_write_empty_ptc(ctx);
344 
345 	/* Set NVS controlled T-state limit */
346 	acpigen_write_tpc(ctx, "\\TLVL");
347 
348 	/* Write TSS table for MSR access */
349 	acpigen_write_tss_package(ctx, entry, nentries);
350 }
351 
acpi_generate_cpu_header(struct acpi_ctx * ctx,int core_id,const struct acpi_cstate * c_state_map,int num_cstates)352 int acpi_generate_cpu_header(struct acpi_ctx *ctx, int core_id,
353 			     const struct acpi_cstate *c_state_map,
354 			     int num_cstates)
355 {
356 	bool is_first = !core_id;
357 
358 	/* Generate processor \_PR.CPUx */
359 	acpigen_write_processor(ctx, core_id, is_first ? ACPI_BASE_ADDRESS : 0,
360 				is_first ? 6 : 0);
361 
362 	/* Generate C-state tables */
363 	acpigen_write_cst_package(ctx, c_state_map, num_cstates);
364 
365 	return 0;
366 }
367 
acpi_generate_cpu_package_final(struct acpi_ctx * ctx,int cores_per_package)368 int acpi_generate_cpu_package_final(struct acpi_ctx *ctx, int cores_per_package)
369 {
370 	/*
371 	 * PPKG is usually used for thermal management of the first and only
372 	 * package
373 	 */
374 	acpigen_write_processor_package(ctx, "PPKG", 0, cores_per_package);
375 
376 	/* Add a method to notify processor nodes */
377 	acpigen_write_processor_cnot(ctx, cores_per_package);
378 
379 	return 0;
380 }
381