1 /*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com> *
6 * This driver allows to upgrade microcode on Intel processors
7 * belonging to IA-32 family - PentiumPro, Pentium II,
8 * Pentium III, Xeon, Pentium 4, etc.
9 *
10 * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
11 * Software Developer's Manual
12 * Order Number 253668 or free download from:
13 *
14 * http://developer.intel.com/design/pentium4/manuals/253668.htm
15 *
16 * For more information, go to http://www.urbanmyth.org/microcode
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 #include <xen/cpu.h>
25 #include <xen/lib.h>
26 #include <xen/kernel.h>
27 #include <xen/init.h>
28 #include <xen/notifier.h>
29 #include <xen/sched.h>
30 #include <xen/smp.h>
31 #include <xen/softirq.h>
32 #include <xen/spinlock.h>
33 #include <xen/tasklet.h>
34 #include <xen/guest_access.h>
35 #include <xen/earlycpio.h>
36
37 #include <asm/msr.h>
38 #include <asm/processor.h>
39 #include <asm/setup.h>
40 #include <asm/microcode.h>
41
42 static module_t __initdata ucode_mod;
43 static void *(*__initdata ucode_mod_map)(const module_t *);
44 static signed int __initdata ucode_mod_idx;
45 static bool_t __initdata ucode_mod_forced;
46
47 /*
48 * If we scan the initramfs.cpio for the early microcode code
49 * and find it, then 'ucode_blob' will contain the pointer
50 * and the size of said blob. It is allocated from Xen's heap
51 * memory.
52 */
53 struct ucode_mod_blob {
54 void *data;
55 size_t size;
56 };
57
58 static struct ucode_mod_blob __initdata ucode_blob;
59 /*
60 * By default we will NOT parse the multiboot modules to see if there is
61 * cpio image with the microcode images.
62 */
63 static bool_t __initdata ucode_scan;
64
microcode_set_module(unsigned int idx)65 void __init microcode_set_module(unsigned int idx)
66 {
67 ucode_mod_idx = idx;
68 ucode_mod_forced = 1;
69 }
70
71 /*
72 * The format is '[<integer>|scan]'. Both options are optional.
73 * If the EFI has forced which of the multiboot payloads is to be used,
74 * no parsing will be attempted.
75 */
parse_ucode(const char * s)76 static int __init parse_ucode(const char *s)
77 {
78 const char *q = NULL;
79
80 if ( ucode_mod_forced ) /* Forced by EFI */
81 return 0;
82
83 if ( !strncmp(s, "scan", 4) )
84 ucode_scan = 1;
85 else
86 ucode_mod_idx = simple_strtol(s, &q, 0);
87
88 return (q && *q) ? -EINVAL : 0;
89 }
90 custom_param("ucode", parse_ucode);
91
92 /*
93 * 8MB ought to be enough.
94 */
95 #define MAX_EARLY_CPIO_MICROCODE (8 << 20)
96
microcode_scan_module(unsigned long * module_map,const multiboot_info_t * mbi,void * (* bootmap)(const module_t *))97 void __init microcode_scan_module(
98 unsigned long *module_map,
99 const multiboot_info_t *mbi,
100 void *(*bootmap)(const module_t *))
101 {
102 module_t *mod = (module_t *)__va(mbi->mods_addr);
103 uint64_t *_blob_start;
104 unsigned long _blob_size;
105 struct cpio_data cd;
106 long offset;
107 const char *p = NULL;
108 int i;
109
110 ucode_blob.size = 0;
111 if ( !ucode_scan )
112 return;
113
114 if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
115 p = "kernel/x86/microcode/AuthenticAMD.bin";
116 else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
117 p = "kernel/x86/microcode/GenuineIntel.bin";
118 else
119 return;
120
121 /*
122 * Try all modules and see whichever could be the microcode blob.
123 */
124 for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ )
125 {
126 if ( !test_bit(i, module_map) )
127 continue;
128
129 _blob_start = bootmap(&mod[i]);
130 _blob_size = mod[i].mod_end;
131 if ( !_blob_start )
132 {
133 printk("Could not map multiboot module #%d (size: %ld)\n",
134 i, _blob_size);
135 continue;
136 }
137 cd.data = NULL;
138 cd.size = 0;
139 cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */);
140 if ( cd.data )
141 {
142 /*
143 * This is an arbitrary check - it would be sad if the blob
144 * consumed most of the memory and did not allow guests
145 * to launch.
146 */
147 if ( cd.size > MAX_EARLY_CPIO_MICROCODE )
148 {
149 printk("Multiboot %d microcode payload too big! (%ld, we can do %d)\n",
150 i, cd.size, MAX_EARLY_CPIO_MICROCODE);
151 goto err;
152 }
153 ucode_blob.size = cd.size;
154 ucode_blob.data = xmalloc_bytes(cd.size);
155 if ( !ucode_blob.data )
156 cd.data = NULL;
157 else
158 memcpy(ucode_blob.data, cd.data, cd.size);
159 }
160 bootmap(NULL);
161 if ( cd.data )
162 break;
163 }
164 return;
165 err:
166 bootmap(NULL);
167 }
microcode_grab_module(unsigned long * module_map,const multiboot_info_t * mbi,void * (* map)(const module_t *))168 void __init microcode_grab_module(
169 unsigned long *module_map,
170 const multiboot_info_t *mbi,
171 void *(*map)(const module_t *))
172 {
173 module_t *mod = (module_t *)__va(mbi->mods_addr);
174
175 if ( ucode_mod_idx < 0 )
176 ucode_mod_idx += mbi->mods_count;
177 if ( ucode_mod_idx <= 0 || ucode_mod_idx >= mbi->mods_count ||
178 !__test_and_clear_bit(ucode_mod_idx, module_map) )
179 goto scan;
180 ucode_mod = mod[ucode_mod_idx];
181 ucode_mod_map = map;
182 scan:
183 if ( ucode_scan )
184 microcode_scan_module(module_map, mbi, map);
185 }
186
187 const struct microcode_ops *microcode_ops;
188
189 static DEFINE_SPINLOCK(microcode_mutex);
190
191 DEFINE_PER_CPU(struct ucode_cpu_info, ucode_cpu_info);
192
193 struct microcode_info {
194 unsigned int cpu;
195 uint32_t buffer_size;
196 int error;
197 char buffer[1];
198 };
199
__microcode_fini_cpu(unsigned int cpu)200 static void __microcode_fini_cpu(unsigned int cpu)
201 {
202 struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
203
204 xfree(uci->mc.mc_valid);
205 memset(uci, 0, sizeof(*uci));
206 }
207
microcode_fini_cpu(unsigned int cpu)208 static void microcode_fini_cpu(unsigned int cpu)
209 {
210 spin_lock(µcode_mutex);
211 __microcode_fini_cpu(cpu);
212 spin_unlock(µcode_mutex);
213 }
214
microcode_resume_cpu(unsigned int cpu)215 int microcode_resume_cpu(unsigned int cpu)
216 {
217 int err;
218 struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
219 struct cpu_signature nsig;
220 unsigned int cpu2;
221
222 if ( !microcode_ops )
223 return 0;
224
225 spin_lock(µcode_mutex);
226
227 err = microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
228 if ( err )
229 {
230 __microcode_fini_cpu(cpu);
231 spin_unlock(µcode_mutex);
232 return err;
233 }
234
235 if ( uci->mc.mc_valid )
236 {
237 err = microcode_ops->microcode_resume_match(cpu, uci->mc.mc_valid);
238 if ( err >= 0 )
239 {
240 if ( err )
241 err = microcode_ops->apply_microcode(cpu);
242 spin_unlock(µcode_mutex);
243 return err;
244 }
245 }
246
247 nsig = uci->cpu_sig;
248 __microcode_fini_cpu(cpu);
249 uci->cpu_sig = nsig;
250
251 err = -EIO;
252 for_each_online_cpu ( cpu2 )
253 {
254 uci = &per_cpu(ucode_cpu_info, cpu2);
255 if ( uci->mc.mc_valid &&
256 microcode_ops->microcode_resume_match(cpu, uci->mc.mc_valid) > 0 )
257 {
258 err = microcode_ops->apply_microcode(cpu);
259 break;
260 }
261 }
262
263 __microcode_fini_cpu(cpu);
264 spin_unlock(µcode_mutex);
265
266 return err;
267 }
268
microcode_update_cpu(const void * buf,size_t size)269 static int microcode_update_cpu(const void *buf, size_t size)
270 {
271 int err;
272 unsigned int cpu = smp_processor_id();
273 struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
274
275 spin_lock(µcode_mutex);
276
277 err = microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
278 if ( likely(!err) )
279 err = microcode_ops->cpu_request_microcode(cpu, buf, size);
280 else
281 __microcode_fini_cpu(cpu);
282
283 spin_unlock(µcode_mutex);
284
285 return err;
286 }
287
do_microcode_update(void * _info)288 static long do_microcode_update(void *_info)
289 {
290 struct microcode_info *info = _info;
291 int error;
292
293 BUG_ON(info->cpu != smp_processor_id());
294
295 error = microcode_update_cpu(info->buffer, info->buffer_size);
296 if ( error )
297 info->error = error;
298
299 info->cpu = cpumask_next(info->cpu, &cpu_online_map);
300 if ( info->cpu < nr_cpu_ids )
301 return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
302
303 error = info->error;
304 xfree(info);
305 return error;
306 }
307
microcode_update(XEN_GUEST_HANDLE_PARAM (const_void)buf,unsigned long len)308 int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len)
309 {
310 int ret;
311 struct microcode_info *info;
312
313 if ( len != (uint32_t)len )
314 return -E2BIG;
315
316 if ( microcode_ops == NULL )
317 return -EINVAL;
318
319 info = xmalloc_bytes(sizeof(*info) + len);
320 if ( info == NULL )
321 return -ENOMEM;
322
323 ret = copy_from_guest(info->buffer, buf, len);
324 if ( ret != 0 )
325 {
326 xfree(info);
327 return ret;
328 }
329
330 info->buffer_size = len;
331 info->error = 0;
332 info->cpu = cpumask_first(&cpu_online_map);
333
334 if ( microcode_ops->start_update )
335 {
336 ret = microcode_ops->start_update();
337 if ( ret != 0 )
338 {
339 xfree(info);
340 return ret;
341 }
342 }
343
344 return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
345 }
346
microcode_init(void)347 static int __init microcode_init(void)
348 {
349 /*
350 * At this point, all CPUs should have updated their microcode
351 * via the early_microcode_* paths so free the microcode blob.
352 */
353 if ( ucode_blob.size )
354 {
355 xfree(ucode_blob.data);
356 ucode_blob.size = 0;
357 ucode_blob.data = NULL;
358 }
359 else if ( ucode_mod.mod_end )
360 {
361 ucode_mod_map(NULL);
362 ucode_mod.mod_end = 0;
363 }
364
365 return 0;
366 }
367 __initcall(microcode_init);
368
microcode_percpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)369 static int microcode_percpu_callback(
370 struct notifier_block *nfb, unsigned long action, void *hcpu)
371 {
372 unsigned int cpu = (unsigned long)hcpu;
373
374 switch ( action )
375 {
376 case CPU_DEAD:
377 microcode_fini_cpu(cpu);
378 break;
379 }
380
381 return NOTIFY_DONE;
382 }
383
384 static struct notifier_block microcode_percpu_nfb = {
385 .notifier_call = microcode_percpu_callback,
386 };
387
early_microcode_update_cpu(bool start_update)388 int __init early_microcode_update_cpu(bool start_update)
389 {
390 int rc = 0;
391 void *data = NULL;
392 size_t len;
393
394 if ( ucode_blob.size )
395 {
396 len = ucode_blob.size;
397 data = ucode_blob.data;
398 }
399 else if ( ucode_mod.mod_end )
400 {
401 len = ucode_mod.mod_end;
402 data = ucode_mod_map(&ucode_mod);
403 }
404 if ( data )
405 {
406 if ( start_update && microcode_ops->start_update )
407 rc = microcode_ops->start_update();
408
409 if ( rc )
410 return rc;
411
412 return microcode_update_cpu(data, len);
413 }
414 else
415 return -ENOMEM;
416 }
417
early_microcode_init(void)418 int __init early_microcode_init(void)
419 {
420 int rc;
421
422 rc = microcode_init_intel();
423 if ( rc )
424 return rc;
425
426 rc = microcode_init_amd();
427 if ( rc )
428 return rc;
429
430 if ( microcode_ops )
431 {
432 if ( ucode_mod.mod_end || ucode_blob.size )
433 rc = early_microcode_update_cpu(true);
434
435 register_cpu_notifier(µcode_percpu_nfb);
436 }
437
438 return rc;
439 }
440