1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Routines to identify caches on Intel CPU.
4 *
5 * Changes:
6 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
7 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
8 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
9 */
10
11 #include <linux/slab.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/cpu.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/sched.h>
16 #include <linux/capability.h>
17 #include <linux/sysfs.h>
18 #include <linux/pci.h>
19 #include <linux/stop_machine.h>
20
21 #include <asm/cpufeature.h>
22 #include <asm/cacheinfo.h>
23 #include <asm/amd_nb.h>
24 #include <asm/smp.h>
25 #include <asm/mtrr.h>
26 #include <asm/tlbflush.h>
27
28 #include "cpu.h"
29
30 #define LVL_1_INST 1
31 #define LVL_1_DATA 2
32 #define LVL_2 3
33 #define LVL_3 4
34 #define LVL_TRACE 5
35
36 /* Shared last level cache maps */
37 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
38
39 /* Shared L2 cache maps */
40 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
41
42 /* Kernel controls MTRR and/or PAT MSRs. */
43 unsigned int memory_caching_control __ro_after_init;
44
45 struct _cache_table {
46 unsigned char descriptor;
47 char cache_type;
48 short size;
49 };
50
51 #define MB(x) ((x) * 1024)
52
53 /* All the cache descriptor types we care about (no TLB or
54 trace cache entries) */
55
56 static const struct _cache_table cache_table[] =
57 {
58 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
59 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
60 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
61 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
62 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
63 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
64 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
65 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
66 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
69 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
70 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
71 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
72 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
74 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
75 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
76 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
77 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
79 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
80 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
81 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
82 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
83 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
84 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
85 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
86 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
87 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
88 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
89 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
90 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
91 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
92 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
93 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
94 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
95 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
96 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
97 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
98 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
99 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
100 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
101 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
102 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
103 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
104 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
105 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
106 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
107 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
108 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
109 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
110 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
111 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
112 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
113 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
114 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
115 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
116 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
117 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
118 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
119 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
120 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
121 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
122 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
123 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
124 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
125 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
126 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
127 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
128 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
129 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
130 { 0x00, 0, 0}
131 };
132
133
134 enum _cache_type {
135 CTYPE_NULL = 0,
136 CTYPE_DATA = 1,
137 CTYPE_INST = 2,
138 CTYPE_UNIFIED = 3
139 };
140
141 union _cpuid4_leaf_eax {
142 struct {
143 enum _cache_type type:5;
144 unsigned int level:3;
145 unsigned int is_self_initializing:1;
146 unsigned int is_fully_associative:1;
147 unsigned int reserved:4;
148 unsigned int num_threads_sharing:12;
149 unsigned int num_cores_on_die:6;
150 } split;
151 u32 full;
152 };
153
154 union _cpuid4_leaf_ebx {
155 struct {
156 unsigned int coherency_line_size:12;
157 unsigned int physical_line_partition:10;
158 unsigned int ways_of_associativity:10;
159 } split;
160 u32 full;
161 };
162
163 union _cpuid4_leaf_ecx {
164 struct {
165 unsigned int number_of_sets:32;
166 } split;
167 u32 full;
168 };
169
170 struct _cpuid4_info_regs {
171 union _cpuid4_leaf_eax eax;
172 union _cpuid4_leaf_ebx ebx;
173 union _cpuid4_leaf_ecx ecx;
174 unsigned int id;
175 unsigned long size;
176 struct amd_northbridge *nb;
177 };
178
179 static unsigned short num_cache_leaves;
180
181 /* AMD doesn't have CPUID4. Emulate it here to report the same
182 information to the user. This makes some assumptions about the machine:
183 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
184
185 In theory the TLBs could be reported as fake type (they are in "dummy").
186 Maybe later */
187 union l1_cache {
188 struct {
189 unsigned line_size:8;
190 unsigned lines_per_tag:8;
191 unsigned assoc:8;
192 unsigned size_in_kb:8;
193 };
194 unsigned val;
195 };
196
197 union l2_cache {
198 struct {
199 unsigned line_size:8;
200 unsigned lines_per_tag:4;
201 unsigned assoc:4;
202 unsigned size_in_kb:16;
203 };
204 unsigned val;
205 };
206
207 union l3_cache {
208 struct {
209 unsigned line_size:8;
210 unsigned lines_per_tag:4;
211 unsigned assoc:4;
212 unsigned res:2;
213 unsigned size_encoded:14;
214 };
215 unsigned val;
216 };
217
218 static const unsigned short assocs[] = {
219 [1] = 1,
220 [2] = 2,
221 [4] = 4,
222 [6] = 8,
223 [8] = 16,
224 [0xa] = 32,
225 [0xb] = 48,
226 [0xc] = 64,
227 [0xd] = 96,
228 [0xe] = 128,
229 [0xf] = 0xffff /* fully associative - no way to show this currently */
230 };
231
232 static const unsigned char levels[] = { 1, 1, 2, 3 };
233 static const unsigned char types[] = { 1, 2, 3, 3 };
234
235 static const enum cache_type cache_type_map[] = {
236 [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
237 [CTYPE_DATA] = CACHE_TYPE_DATA,
238 [CTYPE_INST] = CACHE_TYPE_INST,
239 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
240 };
241
242 static void
amd_cpuid4(int leaf,union _cpuid4_leaf_eax * eax,union _cpuid4_leaf_ebx * ebx,union _cpuid4_leaf_ecx * ecx)243 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
244 union _cpuid4_leaf_ebx *ebx,
245 union _cpuid4_leaf_ecx *ecx)
246 {
247 unsigned dummy;
248 unsigned line_size, lines_per_tag, assoc, size_in_kb;
249 union l1_cache l1i, l1d;
250 union l2_cache l2;
251 union l3_cache l3;
252 union l1_cache *l1 = &l1d;
253
254 eax->full = 0;
255 ebx->full = 0;
256 ecx->full = 0;
257
258 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
259 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
260
261 switch (leaf) {
262 case 1:
263 l1 = &l1i;
264 fallthrough;
265 case 0:
266 if (!l1->val)
267 return;
268 assoc = assocs[l1->assoc];
269 line_size = l1->line_size;
270 lines_per_tag = l1->lines_per_tag;
271 size_in_kb = l1->size_in_kb;
272 break;
273 case 2:
274 if (!l2.val)
275 return;
276 assoc = assocs[l2.assoc];
277 line_size = l2.line_size;
278 lines_per_tag = l2.lines_per_tag;
279 /* cpu_data has errata corrections for K7 applied */
280 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
281 break;
282 case 3:
283 if (!l3.val)
284 return;
285 assoc = assocs[l3.assoc];
286 line_size = l3.line_size;
287 lines_per_tag = l3.lines_per_tag;
288 size_in_kb = l3.size_encoded * 512;
289 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
290 size_in_kb = size_in_kb >> 1;
291 assoc = assoc >> 1;
292 }
293 break;
294 default:
295 return;
296 }
297
298 eax->split.is_self_initializing = 1;
299 eax->split.type = types[leaf];
300 eax->split.level = levels[leaf];
301 eax->split.num_threads_sharing = 0;
302 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
303
304
305 if (assoc == 0xffff)
306 eax->split.is_fully_associative = 1;
307 ebx->split.coherency_line_size = line_size - 1;
308 ebx->split.ways_of_associativity = assoc - 1;
309 ebx->split.physical_line_partition = lines_per_tag - 1;
310 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
311 (ebx->split.ways_of_associativity + 1) - 1;
312 }
313
314 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
315
316 /*
317 * L3 cache descriptors
318 */
amd_calc_l3_indices(struct amd_northbridge * nb)319 static void amd_calc_l3_indices(struct amd_northbridge *nb)
320 {
321 struct amd_l3_cache *l3 = &nb->l3_cache;
322 unsigned int sc0, sc1, sc2, sc3;
323 u32 val = 0;
324
325 pci_read_config_dword(nb->misc, 0x1C4, &val);
326
327 /* calculate subcache sizes */
328 l3->subcaches[0] = sc0 = !(val & BIT(0));
329 l3->subcaches[1] = sc1 = !(val & BIT(4));
330
331 if (boot_cpu_data.x86 == 0x15) {
332 l3->subcaches[0] = sc0 += !(val & BIT(1));
333 l3->subcaches[1] = sc1 += !(val & BIT(5));
334 }
335
336 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
337 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
338
339 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
340 }
341
342 /*
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
346 *
347 * @returns: the disabled index if used or negative value if slot free.
348 */
amd_get_l3_disable_slot(struct amd_northbridge * nb,unsigned slot)349 static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
350 {
351 unsigned int reg = 0;
352
353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
354
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
357 return reg & 0xfff;
358
359 return -1;
360 }
361
show_cache_disable(struct cacheinfo * this_leaf,char * buf,unsigned int slot)362 static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
363 unsigned int slot)
364 {
365 int index;
366 struct amd_northbridge *nb = this_leaf->priv;
367
368 index = amd_get_l3_disable_slot(nb, slot);
369 if (index >= 0)
370 return sprintf(buf, "%d\n", index);
371
372 return sprintf(buf, "FREE\n");
373 }
374
375 #define SHOW_CACHE_DISABLE(slot) \
376 static ssize_t \
377 cache_disable_##slot##_show(struct device *dev, \
378 struct device_attribute *attr, char *buf) \
379 { \
380 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
381 return show_cache_disable(this_leaf, buf, slot); \
382 }
383 SHOW_CACHE_DISABLE(0)
384 SHOW_CACHE_DISABLE(1)
385
amd_l3_disable_index(struct amd_northbridge * nb,int cpu,unsigned slot,unsigned long idx)386 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
387 unsigned slot, unsigned long idx)
388 {
389 int i;
390
391 idx |= BIT(30);
392
393 /*
394 * disable index in all 4 subcaches
395 */
396 for (i = 0; i < 4; i++) {
397 u32 reg = idx | (i << 20);
398
399 if (!nb->l3_cache.subcaches[i])
400 continue;
401
402 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
403
404 /*
405 * We need to WBINVD on a core on the node containing the L3
406 * cache which indices we disable therefore a simple wbinvd()
407 * is not sufficient.
408 */
409 wbinvd_on_cpu(cpu);
410
411 reg |= BIT(31);
412 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
413 }
414 }
415
416 /*
417 * disable a L3 cache index by using a disable-slot
418 *
419 * @l3: L3 cache descriptor
420 * @cpu: A CPU on the node containing the L3 cache
421 * @slot: slot number (0..1)
422 * @index: index to disable
423 *
424 * @return: 0 on success, error status on failure
425 */
amd_set_l3_disable_slot(struct amd_northbridge * nb,int cpu,unsigned slot,unsigned long index)426 static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
427 unsigned slot, unsigned long index)
428 {
429 int ret = 0;
430
431 /* check if @slot is already used or the index is already disabled */
432 ret = amd_get_l3_disable_slot(nb, slot);
433 if (ret >= 0)
434 return -EEXIST;
435
436 if (index > nb->l3_cache.indices)
437 return -EINVAL;
438
439 /* check whether the other slot has disabled the same index already */
440 if (index == amd_get_l3_disable_slot(nb, !slot))
441 return -EEXIST;
442
443 amd_l3_disable_index(nb, cpu, slot, index);
444
445 return 0;
446 }
447
store_cache_disable(struct cacheinfo * this_leaf,const char * buf,size_t count,unsigned int slot)448 static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
449 const char *buf, size_t count,
450 unsigned int slot)
451 {
452 unsigned long val = 0;
453 int cpu, err = 0;
454 struct amd_northbridge *nb = this_leaf->priv;
455
456 if (!capable(CAP_SYS_ADMIN))
457 return -EPERM;
458
459 cpu = cpumask_first(&this_leaf->shared_cpu_map);
460
461 if (kstrtoul(buf, 10, &val) < 0)
462 return -EINVAL;
463
464 err = amd_set_l3_disable_slot(nb, cpu, slot, val);
465 if (err) {
466 if (err == -EEXIST)
467 pr_warn("L3 slot %d in use/index already disabled!\n",
468 slot);
469 return err;
470 }
471 return count;
472 }
473
474 #define STORE_CACHE_DISABLE(slot) \
475 static ssize_t \
476 cache_disable_##slot##_store(struct device *dev, \
477 struct device_attribute *attr, \
478 const char *buf, size_t count) \
479 { \
480 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
481 return store_cache_disable(this_leaf, buf, count, slot); \
482 }
483 STORE_CACHE_DISABLE(0)
484 STORE_CACHE_DISABLE(1)
485
subcaches_show(struct device * dev,struct device_attribute * attr,char * buf)486 static ssize_t subcaches_show(struct device *dev,
487 struct device_attribute *attr, char *buf)
488 {
489 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
490 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
491
492 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
493 }
494
subcaches_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)495 static ssize_t subcaches_store(struct device *dev,
496 struct device_attribute *attr,
497 const char *buf, size_t count)
498 {
499 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
500 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
501 unsigned long val;
502
503 if (!capable(CAP_SYS_ADMIN))
504 return -EPERM;
505
506 if (kstrtoul(buf, 16, &val) < 0)
507 return -EINVAL;
508
509 if (amd_set_subcaches(cpu, val))
510 return -EINVAL;
511
512 return count;
513 }
514
515 static DEVICE_ATTR_RW(cache_disable_0);
516 static DEVICE_ATTR_RW(cache_disable_1);
517 static DEVICE_ATTR_RW(subcaches);
518
519 static umode_t
cache_private_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int unused)520 cache_private_attrs_is_visible(struct kobject *kobj,
521 struct attribute *attr, int unused)
522 {
523 struct device *dev = kobj_to_dev(kobj);
524 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
525 umode_t mode = attr->mode;
526
527 if (!this_leaf->priv)
528 return 0;
529
530 if ((attr == &dev_attr_subcaches.attr) &&
531 amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
532 return mode;
533
534 if ((attr == &dev_attr_cache_disable_0.attr ||
535 attr == &dev_attr_cache_disable_1.attr) &&
536 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
537 return mode;
538
539 return 0;
540 }
541
542 static struct attribute_group cache_private_group = {
543 .is_visible = cache_private_attrs_is_visible,
544 };
545
init_amd_l3_attrs(void)546 static void init_amd_l3_attrs(void)
547 {
548 int n = 1;
549 static struct attribute **amd_l3_attrs;
550
551 if (amd_l3_attrs) /* already initialized */
552 return;
553
554 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
555 n += 2;
556 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
557 n += 1;
558
559 amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
560 if (!amd_l3_attrs)
561 return;
562
563 n = 0;
564 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
565 amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
566 amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
567 }
568 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
569 amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
570
571 cache_private_group.attrs = amd_l3_attrs;
572 }
573
574 const struct attribute_group *
cache_get_priv_group(struct cacheinfo * this_leaf)575 cache_get_priv_group(struct cacheinfo *this_leaf)
576 {
577 struct amd_northbridge *nb = this_leaf->priv;
578
579 if (this_leaf->level < 3 || !nb)
580 return NULL;
581
582 if (nb && nb->l3_cache.indices)
583 init_amd_l3_attrs();
584
585 return &cache_private_group;
586 }
587
amd_init_l3_cache(struct _cpuid4_info_regs * this_leaf,int index)588 static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
589 {
590 int node;
591
592 /* only for L3, and not in virtualized environments */
593 if (index < 3)
594 return;
595
596 node = topology_die_id(smp_processor_id());
597 this_leaf->nb = node_to_amd_nb(node);
598 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
599 amd_calc_l3_indices(this_leaf->nb);
600 }
601 #else
602 #define amd_init_l3_cache(x, y)
603 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
604
605 static int
cpuid4_cache_lookup_regs(int index,struct _cpuid4_info_regs * this_leaf)606 cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
607 {
608 union _cpuid4_leaf_eax eax;
609 union _cpuid4_leaf_ebx ebx;
610 union _cpuid4_leaf_ecx ecx;
611 unsigned edx;
612
613 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
614 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
615 cpuid_count(0x8000001d, index, &eax.full,
616 &ebx.full, &ecx.full, &edx);
617 else
618 amd_cpuid4(index, &eax, &ebx, &ecx);
619 amd_init_l3_cache(this_leaf, index);
620 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
621 cpuid_count(0x8000001d, index, &eax.full,
622 &ebx.full, &ecx.full, &edx);
623 amd_init_l3_cache(this_leaf, index);
624 } else {
625 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
626 }
627
628 if (eax.split.type == CTYPE_NULL)
629 return -EIO; /* better error ? */
630
631 this_leaf->eax = eax;
632 this_leaf->ebx = ebx;
633 this_leaf->ecx = ecx;
634 this_leaf->size = (ecx.split.number_of_sets + 1) *
635 (ebx.split.coherency_line_size + 1) *
636 (ebx.split.physical_line_partition + 1) *
637 (ebx.split.ways_of_associativity + 1);
638 return 0;
639 }
640
find_num_cache_leaves(struct cpuinfo_x86 * c)641 static int find_num_cache_leaves(struct cpuinfo_x86 *c)
642 {
643 unsigned int eax, ebx, ecx, edx, op;
644 union _cpuid4_leaf_eax cache_eax;
645 int i = -1;
646
647 if (c->x86_vendor == X86_VENDOR_AMD ||
648 c->x86_vendor == X86_VENDOR_HYGON)
649 op = 0x8000001d;
650 else
651 op = 4;
652
653 do {
654 ++i;
655 /* Do cpuid(op) loop to find out num_cache_leaves */
656 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
657 cache_eax.full = eax;
658 } while (cache_eax.split.type != CTYPE_NULL);
659 return i;
660 }
661
cacheinfo_amd_init_llc_id(struct cpuinfo_x86 * c,int cpu)662 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
663 {
664 /*
665 * We may have multiple LLCs if L3 caches exist, so check if we
666 * have an L3 cache by looking at the L3 cache CPUID leaf.
667 */
668 if (!cpuid_edx(0x80000006))
669 return;
670
671 if (c->x86 < 0x17) {
672 /* LLC is at the node level. */
673 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
674 } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
675 /*
676 * LLC is at the core complex level.
677 * Core complex ID is ApicId[3] for these processors.
678 */
679 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
680 } else {
681 /*
682 * LLC ID is calculated from the number of threads sharing the
683 * cache.
684 * */
685 u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
686 u32 llc_index = find_num_cache_leaves(c) - 1;
687
688 cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
689 if (eax)
690 num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
691
692 if (num_sharing_cache) {
693 int bits = get_count_order(num_sharing_cache);
694
695 per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
696 }
697 }
698 }
699
cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 * c,int cpu)700 void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
701 {
702 /*
703 * We may have multiple LLCs if L3 caches exist, so check if we
704 * have an L3 cache by looking at the L3 cache CPUID leaf.
705 */
706 if (!cpuid_edx(0x80000006))
707 return;
708
709 /*
710 * LLC is at the core complex level.
711 * Core complex ID is ApicId[3] for these processors.
712 */
713 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
714 }
715
init_amd_cacheinfo(struct cpuinfo_x86 * c)716 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
717 {
718
719 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
720 num_cache_leaves = find_num_cache_leaves(c);
721 } else if (c->extended_cpuid_level >= 0x80000006) {
722 if (cpuid_edx(0x80000006) & 0xf000)
723 num_cache_leaves = 4;
724 else
725 num_cache_leaves = 3;
726 }
727 }
728
init_hygon_cacheinfo(struct cpuinfo_x86 * c)729 void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
730 {
731 num_cache_leaves = find_num_cache_leaves(c);
732 }
733
init_intel_cacheinfo(struct cpuinfo_x86 * c)734 void init_intel_cacheinfo(struct cpuinfo_x86 *c)
735 {
736 /* Cache sizes */
737 unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0;
738 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
739 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
740 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
741 #ifdef CONFIG_SMP
742 unsigned int cpu = c->cpu_index;
743 #endif
744
745 if (c->cpuid_level > 3) {
746 static int is_initialized;
747
748 if (is_initialized == 0) {
749 /* Init num_cache_leaves from boot CPU */
750 num_cache_leaves = find_num_cache_leaves(c);
751 is_initialized++;
752 }
753
754 /*
755 * Whenever possible use cpuid(4), deterministic cache
756 * parameters cpuid leaf to find the cache details
757 */
758 for (i = 0; i < num_cache_leaves; i++) {
759 struct _cpuid4_info_regs this_leaf = {};
760 int retval;
761
762 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
763 if (retval < 0)
764 continue;
765
766 switch (this_leaf.eax.split.level) {
767 case 1:
768 if (this_leaf.eax.split.type == CTYPE_DATA)
769 new_l1d = this_leaf.size/1024;
770 else if (this_leaf.eax.split.type == CTYPE_INST)
771 new_l1i = this_leaf.size/1024;
772 break;
773 case 2:
774 new_l2 = this_leaf.size/1024;
775 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
776 index_msb = get_count_order(num_threads_sharing);
777 l2_id = c->apicid & ~((1 << index_msb) - 1);
778 break;
779 case 3:
780 new_l3 = this_leaf.size/1024;
781 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
782 index_msb = get_count_order(num_threads_sharing);
783 l3_id = c->apicid & ~((1 << index_msb) - 1);
784 break;
785 default:
786 break;
787 }
788 }
789 }
790 /*
791 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
792 * trace cache
793 */
794 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
795 /* supports eax=2 call */
796 int j, n;
797 unsigned int regs[4];
798 unsigned char *dp = (unsigned char *)regs;
799 int only_trace = 0;
800
801 if (num_cache_leaves != 0 && c->x86 == 15)
802 only_trace = 1;
803
804 /* Number of times to iterate */
805 n = cpuid_eax(2) & 0xFF;
806
807 for (i = 0 ; i < n ; i++) {
808 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
809
810 /* If bit 31 is set, this is an unknown format */
811 for (j = 0 ; j < 3 ; j++)
812 if (regs[j] & (1 << 31))
813 regs[j] = 0;
814
815 /* Byte 0 is level count, not a descriptor */
816 for (j = 1 ; j < 16 ; j++) {
817 unsigned char des = dp[j];
818 unsigned char k = 0;
819
820 /* look up this descriptor in the table */
821 while (cache_table[k].descriptor != 0) {
822 if (cache_table[k].descriptor == des) {
823 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
824 break;
825 switch (cache_table[k].cache_type) {
826 case LVL_1_INST:
827 l1i += cache_table[k].size;
828 break;
829 case LVL_1_DATA:
830 l1d += cache_table[k].size;
831 break;
832 case LVL_2:
833 l2 += cache_table[k].size;
834 break;
835 case LVL_3:
836 l3 += cache_table[k].size;
837 break;
838 }
839
840 break;
841 }
842
843 k++;
844 }
845 }
846 }
847 }
848
849 if (new_l1d)
850 l1d = new_l1d;
851
852 if (new_l1i)
853 l1i = new_l1i;
854
855 if (new_l2) {
856 l2 = new_l2;
857 #ifdef CONFIG_SMP
858 per_cpu(cpu_llc_id, cpu) = l2_id;
859 per_cpu(cpu_l2c_id, cpu) = l2_id;
860 #endif
861 }
862
863 if (new_l3) {
864 l3 = new_l3;
865 #ifdef CONFIG_SMP
866 per_cpu(cpu_llc_id, cpu) = l3_id;
867 #endif
868 }
869
870 #ifdef CONFIG_SMP
871 /*
872 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
873 * turns means that the only possibility is SMT (as indicated in
874 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
875 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
876 * c->phys_proc_id.
877 */
878 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
879 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
880 #endif
881
882 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
883
884 if (!l2)
885 cpu_detect_cache_sizes(c);
886 }
887
__cache_amd_cpumap_setup(unsigned int cpu,int index,struct _cpuid4_info_regs * base)888 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
889 struct _cpuid4_info_regs *base)
890 {
891 struct cpu_cacheinfo *this_cpu_ci;
892 struct cacheinfo *this_leaf;
893 int i, sibling;
894
895 /*
896 * For L3, always use the pre-calculated cpu_llc_shared_mask
897 * to derive shared_cpu_map.
898 */
899 if (index == 3) {
900 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
901 this_cpu_ci = get_cpu_cacheinfo(i);
902 if (!this_cpu_ci->info_list)
903 continue;
904 this_leaf = this_cpu_ci->info_list + index;
905 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
906 if (!cpu_online(sibling))
907 continue;
908 cpumask_set_cpu(sibling,
909 &this_leaf->shared_cpu_map);
910 }
911 }
912 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
913 unsigned int apicid, nshared, first, last;
914
915 nshared = base->eax.split.num_threads_sharing + 1;
916 apicid = cpu_data(cpu).apicid;
917 first = apicid - (apicid % nshared);
918 last = first + nshared - 1;
919
920 for_each_online_cpu(i) {
921 this_cpu_ci = get_cpu_cacheinfo(i);
922 if (!this_cpu_ci->info_list)
923 continue;
924
925 apicid = cpu_data(i).apicid;
926 if ((apicid < first) || (apicid > last))
927 continue;
928
929 this_leaf = this_cpu_ci->info_list + index;
930
931 for_each_online_cpu(sibling) {
932 apicid = cpu_data(sibling).apicid;
933 if ((apicid < first) || (apicid > last))
934 continue;
935 cpumask_set_cpu(sibling,
936 &this_leaf->shared_cpu_map);
937 }
938 }
939 } else
940 return 0;
941
942 return 1;
943 }
944
__cache_cpumap_setup(unsigned int cpu,int index,struct _cpuid4_info_regs * base)945 static void __cache_cpumap_setup(unsigned int cpu, int index,
946 struct _cpuid4_info_regs *base)
947 {
948 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
949 struct cacheinfo *this_leaf, *sibling_leaf;
950 unsigned long num_threads_sharing;
951 int index_msb, i;
952 struct cpuinfo_x86 *c = &cpu_data(cpu);
953
954 if (c->x86_vendor == X86_VENDOR_AMD ||
955 c->x86_vendor == X86_VENDOR_HYGON) {
956 if (__cache_amd_cpumap_setup(cpu, index, base))
957 return;
958 }
959
960 this_leaf = this_cpu_ci->info_list + index;
961 num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
962
963 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
964 if (num_threads_sharing == 1)
965 return;
966
967 index_msb = get_count_order(num_threads_sharing);
968
969 for_each_online_cpu(i)
970 if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
971 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
972
973 if (i == cpu || !sib_cpu_ci->info_list)
974 continue;/* skip if itself or no cacheinfo */
975 sibling_leaf = sib_cpu_ci->info_list + index;
976 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
977 cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
978 }
979 }
980
ci_leaf_init(struct cacheinfo * this_leaf,struct _cpuid4_info_regs * base)981 static void ci_leaf_init(struct cacheinfo *this_leaf,
982 struct _cpuid4_info_regs *base)
983 {
984 this_leaf->id = base->id;
985 this_leaf->attributes = CACHE_ID;
986 this_leaf->level = base->eax.split.level;
987 this_leaf->type = cache_type_map[base->eax.split.type];
988 this_leaf->coherency_line_size =
989 base->ebx.split.coherency_line_size + 1;
990 this_leaf->ways_of_associativity =
991 base->ebx.split.ways_of_associativity + 1;
992 this_leaf->size = base->size;
993 this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
994 this_leaf->physical_line_partition =
995 base->ebx.split.physical_line_partition + 1;
996 this_leaf->priv = base->nb;
997 }
998
init_cache_level(unsigned int cpu)999 int init_cache_level(unsigned int cpu)
1000 {
1001 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
1002
1003 if (!num_cache_leaves)
1004 return -ENOENT;
1005 if (!this_cpu_ci)
1006 return -EINVAL;
1007 this_cpu_ci->num_levels = 3;
1008 this_cpu_ci->num_leaves = num_cache_leaves;
1009 return 0;
1010 }
1011
1012 /*
1013 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
1014 * ECX as cache index. Then right shift apicid by the number's order to get
1015 * cache id for this cache node.
1016 */
get_cache_id(int cpu,struct _cpuid4_info_regs * id4_regs)1017 static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
1018 {
1019 struct cpuinfo_x86 *c = &cpu_data(cpu);
1020 unsigned long num_threads_sharing;
1021 int index_msb;
1022
1023 num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
1024 index_msb = get_count_order(num_threads_sharing);
1025 id4_regs->id = c->apicid >> index_msb;
1026 }
1027
populate_cache_leaves(unsigned int cpu)1028 int populate_cache_leaves(unsigned int cpu)
1029 {
1030 unsigned int idx, ret;
1031 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
1032 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
1033 struct _cpuid4_info_regs id4_regs = {};
1034
1035 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
1036 ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
1037 if (ret)
1038 return ret;
1039 get_cache_id(cpu, &id4_regs);
1040 ci_leaf_init(this_leaf++, &id4_regs);
1041 __cache_cpumap_setup(cpu, idx, &id4_regs);
1042 }
1043 this_cpu_ci->cpu_map_populated = true;
1044
1045 return 0;
1046 }
1047
1048 /*
1049 * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
1050 *
1051 * Since we are disabling the cache don't allow any interrupts,
1052 * they would run extremely slow and would only increase the pain.
1053 *
1054 * The caller must ensure that local interrupts are disabled and
1055 * are reenabled after cache_enable() has been called.
1056 */
1057 static unsigned long saved_cr4;
1058 static DEFINE_RAW_SPINLOCK(cache_disable_lock);
1059
cache_disable(void)1060 void cache_disable(void) __acquires(cache_disable_lock)
1061 {
1062 unsigned long cr0;
1063
1064 /*
1065 * Note that this is not ideal
1066 * since the cache is only flushed/disabled for this CPU while the
1067 * MTRRs are changed, but changing this requires more invasive
1068 * changes to the way the kernel boots
1069 */
1070
1071 raw_spin_lock(&cache_disable_lock);
1072
1073 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
1074 cr0 = read_cr0() | X86_CR0_CD;
1075 write_cr0(cr0);
1076
1077 /*
1078 * Cache flushing is the most time-consuming step when programming
1079 * the MTRRs. Fortunately, as per the Intel Software Development
1080 * Manual, we can skip it if the processor supports cache self-
1081 * snooping.
1082 */
1083 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
1084 wbinvd();
1085
1086 /* Save value of CR4 and clear Page Global Enable (bit 7) */
1087 if (cpu_feature_enabled(X86_FEATURE_PGE)) {
1088 saved_cr4 = __read_cr4();
1089 __write_cr4(saved_cr4 & ~X86_CR4_PGE);
1090 }
1091
1092 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
1093 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1094 flush_tlb_local();
1095
1096 if (cpu_feature_enabled(X86_FEATURE_MTRR))
1097 mtrr_disable();
1098
1099 /* Again, only flush caches if we have to. */
1100 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
1101 wbinvd();
1102 }
1103
cache_enable(void)1104 void cache_enable(void) __releases(cache_disable_lock)
1105 {
1106 /* Flush TLBs (no need to flush caches - they are disabled) */
1107 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1108 flush_tlb_local();
1109
1110 if (cpu_feature_enabled(X86_FEATURE_MTRR))
1111 mtrr_enable();
1112
1113 /* Enable caches */
1114 write_cr0(read_cr0() & ~X86_CR0_CD);
1115
1116 /* Restore value of CR4 */
1117 if (cpu_feature_enabled(X86_FEATURE_PGE))
1118 __write_cr4(saved_cr4);
1119
1120 raw_spin_unlock(&cache_disable_lock);
1121 }
1122
cache_cpu_init(void)1123 static void cache_cpu_init(void)
1124 {
1125 unsigned long flags;
1126
1127 local_irq_save(flags);
1128 cache_disable();
1129
1130 if (memory_caching_control & CACHE_MTRR)
1131 mtrr_generic_set_state();
1132
1133 if (memory_caching_control & CACHE_PAT)
1134 pat_cpu_init();
1135
1136 cache_enable();
1137 local_irq_restore(flags);
1138 }
1139
1140 static bool cache_aps_delayed_init = true;
1141
set_cache_aps_delayed_init(bool val)1142 void set_cache_aps_delayed_init(bool val)
1143 {
1144 cache_aps_delayed_init = val;
1145 }
1146
get_cache_aps_delayed_init(void)1147 bool get_cache_aps_delayed_init(void)
1148 {
1149 return cache_aps_delayed_init;
1150 }
1151
cache_rendezvous_handler(void * unused)1152 static int cache_rendezvous_handler(void *unused)
1153 {
1154 if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id()))
1155 cache_cpu_init();
1156
1157 return 0;
1158 }
1159
cache_bp_init(void)1160 void __init cache_bp_init(void)
1161 {
1162 mtrr_bp_init();
1163 pat_bp_init();
1164
1165 if (memory_caching_control)
1166 cache_cpu_init();
1167 }
1168
cache_bp_restore(void)1169 void cache_bp_restore(void)
1170 {
1171 if (memory_caching_control)
1172 cache_cpu_init();
1173 }
1174
cache_ap_init(unsigned int cpu)1175 static int cache_ap_init(unsigned int cpu)
1176 {
1177 if (!memory_caching_control || get_cache_aps_delayed_init())
1178 return 0;
1179
1180 /*
1181 * Ideally we should hold mtrr_mutex here to avoid MTRR entries
1182 * changed, but this routine will be called in CPU boot time,
1183 * holding the lock breaks it.
1184 *
1185 * This routine is called in two cases:
1186 *
1187 * 1. very early time of software resume, when there absolutely
1188 * isn't MTRR entry changes;
1189 *
1190 * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug
1191 * lock to prevent MTRR entry changes
1192 */
1193 stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL,
1194 cpu_callout_mask);
1195
1196 return 0;
1197 }
1198
1199 /*
1200 * Delayed cache initialization for all AP's
1201 */
cache_aps_init(void)1202 void cache_aps_init(void)
1203 {
1204 if (!memory_caching_control || !get_cache_aps_delayed_init())
1205 return;
1206
1207 stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask);
1208 set_cache_aps_delayed_init(false);
1209 }
1210
cache_ap_register(void)1211 static int __init cache_ap_register(void)
1212 {
1213 cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING,
1214 "x86/cachectrl:starting",
1215 cache_ap_init, NULL);
1216 return 0;
1217 }
1218 core_initcall(cache_ap_register);
1219