1 /*
2  * Intel CPU Microcode Update Driver for Linux
3  *
4  * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5  *		 2006 Shaohua Li <shaohua.li@intel.com>
6  *
7  * Intel CPU microcode early update for Linux
8  *
9  * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10  *		      H Peter Anvin" <hpa@zytor.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version
15  * 2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20  * printk calls into no_printk().
21  *
22  *#define DEBUG
23  */
24 #define pr_fmt(fmt) "microcode: " fmt
25 
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/mm.h>
35 
36 #include <asm/microcode_intel.h>
37 #include <asm/processor.h>
38 #include <asm/tlbflush.h>
39 #include <asm/setup.h>
40 #include <asm/msr.h>
41 
42 static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
43 static struct mc_saved_data {
44 	unsigned int mc_saved_count;
45 	struct microcode_intel **mc_saved;
46 } mc_saved_data;
47 
48 static enum ucode_state
load_microcode_early(struct microcode_intel ** saved,unsigned int num_saved,struct ucode_cpu_info * uci)49 load_microcode_early(struct microcode_intel **saved,
50 		     unsigned int num_saved, struct ucode_cpu_info *uci)
51 {
52 	struct microcode_intel *ucode_ptr, *new_mc = NULL;
53 	struct microcode_header_intel *mc_hdr;
54 	int new_rev, ret, i;
55 
56 	new_rev = uci->cpu_sig.rev;
57 
58 	for (i = 0; i < num_saved; i++) {
59 		ucode_ptr = saved[i];
60 		mc_hdr	  = (struct microcode_header_intel *)ucode_ptr;
61 
62 		ret = has_newer_microcode(ucode_ptr,
63 					  uci->cpu_sig.sig,
64 					  uci->cpu_sig.pf,
65 					  new_rev);
66 		if (!ret)
67 			continue;
68 
69 		new_rev = mc_hdr->rev;
70 		new_mc  = ucode_ptr;
71 	}
72 
73 	if (!new_mc)
74 		return UCODE_NFOUND;
75 
76 	uci->mc = (struct microcode_intel *)new_mc;
77 	return UCODE_OK;
78 }
79 
80 static inline void
copy_initrd_ptrs(struct microcode_intel ** mc_saved,unsigned long * initrd,unsigned long off,int num_saved)81 copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
82 		  unsigned long off, int num_saved)
83 {
84 	int i;
85 
86 	for (i = 0; i < num_saved; i++)
87 		mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
88 }
89 
90 #ifdef CONFIG_X86_32
91 static void
microcode_phys(struct microcode_intel ** mc_saved_tmp,struct mc_saved_data * mc_saved_data)92 microcode_phys(struct microcode_intel **mc_saved_tmp,
93 	       struct mc_saved_data *mc_saved_data)
94 {
95 	int i;
96 	struct microcode_intel ***mc_saved;
97 
98 	mc_saved = (struct microcode_intel ***)
99 		   __pa_nodebug(&mc_saved_data->mc_saved);
100 	for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
101 		struct microcode_intel *p;
102 
103 		p = *(struct microcode_intel **)
104 			__pa_nodebug(mc_saved_data->mc_saved + i);
105 		mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
106 	}
107 }
108 #endif
109 
110 static enum ucode_state
load_microcode(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long initrd_start,struct ucode_cpu_info * uci)111 load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
112 	       unsigned long initrd_start, struct ucode_cpu_info *uci)
113 {
114 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
115 	unsigned int count = mc_saved_data->mc_saved_count;
116 
117 	if (!mc_saved_data->mc_saved) {
118 		copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
119 
120 		return load_microcode_early(mc_saved_tmp, count, uci);
121 	} else {
122 #ifdef CONFIG_X86_32
123 		microcode_phys(mc_saved_tmp, mc_saved_data);
124 		return load_microcode_early(mc_saved_tmp, count, uci);
125 #else
126 		return load_microcode_early(mc_saved_data->mc_saved,
127 						    count, uci);
128 #endif
129 	}
130 }
131 
132 /*
133  * Given CPU signature and a microcode patch, this function finds if the
134  * microcode patch has matching family and model with the CPU.
135  */
136 static enum ucode_state
matching_model_microcode(struct microcode_header_intel * mc_header,unsigned long sig)137 matching_model_microcode(struct microcode_header_intel *mc_header,
138 			unsigned long sig)
139 {
140 	unsigned int fam, model;
141 	unsigned int fam_ucode, model_ucode;
142 	struct extended_sigtable *ext_header;
143 	unsigned long total_size = get_totalsize(mc_header);
144 	unsigned long data_size = get_datasize(mc_header);
145 	int ext_sigcount, i;
146 	struct extended_signature *ext_sig;
147 
148 	fam   = __x86_family(sig);
149 	model = x86_model(sig);
150 
151 	fam_ucode   = __x86_family(mc_header->sig);
152 	model_ucode = x86_model(mc_header->sig);
153 
154 	if (fam == fam_ucode && model == model_ucode)
155 		return UCODE_OK;
156 
157 	/* Look for ext. headers: */
158 	if (total_size <= data_size + MC_HEADER_SIZE)
159 		return UCODE_NFOUND;
160 
161 	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
162 	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
163 	ext_sigcount = ext_header->count;
164 
165 	for (i = 0; i < ext_sigcount; i++) {
166 		fam_ucode   = __x86_family(ext_sig->sig);
167 		model_ucode = x86_model(ext_sig->sig);
168 
169 		if (fam == fam_ucode && model == model_ucode)
170 			return UCODE_OK;
171 
172 		ext_sig++;
173 	}
174 	return UCODE_NFOUND;
175 }
176 
177 static int
save_microcode(struct mc_saved_data * mc_saved_data,struct microcode_intel ** mc_saved_src,unsigned int mc_saved_count)178 save_microcode(struct mc_saved_data *mc_saved_data,
179 	       struct microcode_intel **mc_saved_src,
180 	       unsigned int mc_saved_count)
181 {
182 	int i, j;
183 	struct microcode_intel **saved_ptr;
184 	int ret;
185 
186 	if (!mc_saved_count)
187 		return -EINVAL;
188 
189 	/*
190 	 * Copy new microcode data.
191 	 */
192 	saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
193 	if (!saved_ptr)
194 		return -ENOMEM;
195 
196 	for (i = 0; i < mc_saved_count; i++) {
197 		struct microcode_header_intel *mc_hdr;
198 		struct microcode_intel *mc;
199 		unsigned long size;
200 
201 		if (!mc_saved_src[i]) {
202 			ret = -EINVAL;
203 			goto err;
204 		}
205 
206 		mc     = mc_saved_src[i];
207 		mc_hdr = &mc->hdr;
208 		size   = get_totalsize(mc_hdr);
209 
210 		saved_ptr[i] = kmalloc(size, GFP_KERNEL);
211 		if (!saved_ptr[i]) {
212 			ret = -ENOMEM;
213 			goto err;
214 		}
215 
216 		memcpy(saved_ptr[i], mc, size);
217 	}
218 
219 	/*
220 	 * Point to newly saved microcode.
221 	 */
222 	mc_saved_data->mc_saved = saved_ptr;
223 	mc_saved_data->mc_saved_count = mc_saved_count;
224 
225 	return 0;
226 
227 err:
228 	for (j = 0; j <= i; j++)
229 		kfree(saved_ptr[j]);
230 	kfree(saved_ptr);
231 
232 	return ret;
233 }
234 
235 /*
236  * A microcode patch in ucode_ptr is saved into mc_saved
237  * - if it has matching signature and newer revision compared to an existing
238  *   patch mc_saved.
239  * - or if it is a newly discovered microcode patch.
240  *
241  * The microcode patch should have matching model with CPU.
242  *
243  * Returns: The updated number @num_saved of saved microcode patches.
244  */
_save_mc(struct microcode_intel ** mc_saved,u8 * ucode_ptr,unsigned int num_saved)245 static unsigned int _save_mc(struct microcode_intel **mc_saved,
246 			     u8 *ucode_ptr, unsigned int num_saved)
247 {
248 	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
249 	unsigned int sig, pf;
250 	int found = 0, i;
251 
252 	mc_hdr = (struct microcode_header_intel *)ucode_ptr;
253 
254 	for (i = 0; i < num_saved; i++) {
255 		mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
256 		sig	     = mc_saved_hdr->sig;
257 		pf	     = mc_saved_hdr->pf;
258 
259 		if (!find_matching_signature(ucode_ptr, sig, pf))
260 			continue;
261 
262 		found = 1;
263 
264 		if (mc_hdr->rev <= mc_saved_hdr->rev)
265 			continue;
266 
267 		/*
268 		 * Found an older ucode saved earlier. Replace it with
269 		 * this newer one.
270 		 */
271 		mc_saved[i] = (struct microcode_intel *)ucode_ptr;
272 		break;
273 	}
274 
275 	/* Newly detected microcode, save it to memory. */
276 	if (i >= num_saved && !found)
277 		mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
278 
279 	return num_saved;
280 }
281 
282 /*
283  * Get microcode matching with BSP's model. Only CPUs with the same model as
284  * BSP can stay in the platform.
285  */
286 static enum ucode_state __init
get_matching_model_microcode(int cpu,unsigned long start,void * data,size_t size,struct mc_saved_data * mc_saved_data,unsigned long * mc_saved_in_initrd,struct ucode_cpu_info * uci)287 get_matching_model_microcode(int cpu, unsigned long start,
288 			     void *data, size_t size,
289 			     struct mc_saved_data *mc_saved_data,
290 			     unsigned long *mc_saved_in_initrd,
291 			     struct ucode_cpu_info *uci)
292 {
293 	u8 *ucode_ptr = data;
294 	unsigned int leftover = size;
295 	enum ucode_state state = UCODE_OK;
296 	unsigned int mc_size;
297 	struct microcode_header_intel *mc_header;
298 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
299 	unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
300 	int i;
301 
302 	while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
303 
304 		if (leftover < sizeof(mc_header))
305 			break;
306 
307 		mc_header = (struct microcode_header_intel *)ucode_ptr;
308 
309 		mc_size = get_totalsize(mc_header);
310 		if (!mc_size || mc_size > leftover ||
311 			microcode_sanity_check(ucode_ptr, 0) < 0)
312 			break;
313 
314 		leftover -= mc_size;
315 
316 		/*
317 		 * Since APs with same family and model as the BSP may boot in
318 		 * the platform, we need to find and save microcode patches
319 		 * with the same family and model as the BSP.
320 		 */
321 		if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
322 			 UCODE_OK) {
323 			ucode_ptr += mc_size;
324 			continue;
325 		}
326 
327 		mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
328 
329 		ucode_ptr += mc_size;
330 	}
331 
332 	if (leftover) {
333 		state = UCODE_ERROR;
334 		goto out;
335 	}
336 
337 	if (mc_saved_count == 0) {
338 		state = UCODE_NFOUND;
339 		goto out;
340 	}
341 
342 	for (i = 0; i < mc_saved_count; i++)
343 		mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
344 
345 	mc_saved_data->mc_saved_count = mc_saved_count;
346 out:
347 	return state;
348 }
349 
collect_cpu_info_early(struct ucode_cpu_info * uci)350 static int collect_cpu_info_early(struct ucode_cpu_info *uci)
351 {
352 	unsigned int val[2];
353 	unsigned int family, model;
354 	struct cpu_signature csig;
355 	unsigned int eax, ebx, ecx, edx;
356 
357 	csig.sig = 0;
358 	csig.pf = 0;
359 	csig.rev = 0;
360 
361 	memset(uci, 0, sizeof(*uci));
362 
363 	eax = 0x00000001;
364 	ecx = 0;
365 	native_cpuid(&eax, &ebx, &ecx, &edx);
366 	csig.sig = eax;
367 
368 	family = __x86_family(csig.sig);
369 	model  = x86_model(csig.sig);
370 
371 	if ((model >= 5) || (family > 6)) {
372 		/* get processor flags from MSR 0x17 */
373 		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
374 		csig.pf = 1 << ((val[1] >> 18) & 7);
375 	}
376 	native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
377 
378 	/* As documented in the SDM: Do a CPUID 1 here */
379 	sync_core();
380 
381 	/* get the current revision from MSR 0x8B */
382 	native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
383 
384 	csig.rev = val[1];
385 
386 	uci->cpu_sig = csig;
387 	uci->valid = 1;
388 
389 	return 0;
390 }
391 
show_saved_mc(void)392 static void show_saved_mc(void)
393 {
394 #ifdef DEBUG
395 	int i, j;
396 	unsigned int sig, pf, rev, total_size, data_size, date;
397 	struct ucode_cpu_info uci;
398 
399 	if (mc_saved_data.mc_saved_count == 0) {
400 		pr_debug("no microcode data saved.\n");
401 		return;
402 	}
403 	pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
404 
405 	collect_cpu_info_early(&uci);
406 
407 	sig = uci.cpu_sig.sig;
408 	pf = uci.cpu_sig.pf;
409 	rev = uci.cpu_sig.rev;
410 	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
411 
412 	for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
413 		struct microcode_header_intel *mc_saved_header;
414 		struct extended_sigtable *ext_header;
415 		int ext_sigcount;
416 		struct extended_signature *ext_sig;
417 
418 		mc_saved_header = (struct microcode_header_intel *)
419 				  mc_saved_data.mc_saved[i];
420 		sig = mc_saved_header->sig;
421 		pf = mc_saved_header->pf;
422 		rev = mc_saved_header->rev;
423 		total_size = get_totalsize(mc_saved_header);
424 		data_size = get_datasize(mc_saved_header);
425 		date = mc_saved_header->date;
426 
427 		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
428 			 i, sig, pf, rev, total_size,
429 			 date & 0xffff,
430 			 date >> 24,
431 			 (date >> 16) & 0xff);
432 
433 		/* Look for ext. headers: */
434 		if (total_size <= data_size + MC_HEADER_SIZE)
435 			continue;
436 
437 		ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
438 		ext_sigcount = ext_header->count;
439 		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
440 
441 		for (j = 0; j < ext_sigcount; j++) {
442 			sig = ext_sig->sig;
443 			pf = ext_sig->pf;
444 
445 			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
446 				 j, sig, pf);
447 
448 			ext_sig++;
449 		}
450 
451 	}
452 #endif
453 }
454 
455 #ifdef CONFIG_HOTPLUG_CPU
456 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
457 /*
458  * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
459  * hot added or resumes.
460  *
461  * Please make sure this mc should be a valid microcode patch before calling
462  * this function.
463  */
save_mc_for_early(u8 * mc)464 int save_mc_for_early(u8 *mc)
465 {
466 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
467 	unsigned int mc_saved_count_init;
468 	unsigned int mc_saved_count;
469 	struct microcode_intel **mc_saved;
470 	int ret = 0;
471 	int i;
472 
473 	/*
474 	 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
475 	 * hotplug.
476 	 */
477 	mutex_lock(&x86_cpu_microcode_mutex);
478 
479 	mc_saved_count_init = mc_saved_data.mc_saved_count;
480 	mc_saved_count = mc_saved_data.mc_saved_count;
481 	mc_saved = mc_saved_data.mc_saved;
482 
483 	if (mc_saved && mc_saved_count)
484 		memcpy(mc_saved_tmp, mc_saved,
485 		       mc_saved_count * sizeof(struct microcode_intel *));
486 	/*
487 	 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
488 	 * version.
489 	 */
490 	mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
491 
492 	/*
493 	 * Save the mc_save_tmp in global mc_saved_data.
494 	 */
495 	ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
496 	if (ret) {
497 		pr_err("Cannot save microcode patch.\n");
498 		goto out;
499 	}
500 
501 	show_saved_mc();
502 
503 	/*
504 	 * Free old saved microcode data.
505 	 */
506 	if (mc_saved) {
507 		for (i = 0; i < mc_saved_count_init; i++)
508 			kfree(mc_saved[i]);
509 		kfree(mc_saved);
510 	}
511 
512 out:
513 	mutex_unlock(&x86_cpu_microcode_mutex);
514 
515 	return ret;
516 }
517 EXPORT_SYMBOL_GPL(save_mc_for_early);
518 #endif
519 
load_builtin_intel_microcode(struct cpio_data * cp)520 static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
521 {
522 #ifdef CONFIG_X86_64
523 	unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
524 	unsigned int family, model, stepping;
525 	char name[30];
526 
527 	native_cpuid(&eax, &ebx, &ecx, &edx);
528 
529 	family   = __x86_family(eax);
530 	model    = x86_model(eax);
531 	stepping = eax & 0xf;
532 
533 	sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
534 
535 	return get_builtin_firmware(cp, name);
536 #else
537 	return false;
538 #endif
539 }
540 
541 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
542 static __init enum ucode_state
scan_microcode(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long start,unsigned long size,struct ucode_cpu_info * uci)543 scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
544 	       unsigned long start, unsigned long size,
545 	       struct ucode_cpu_info *uci)
546 {
547 	struct cpio_data cd;
548 	long offset = 0;
549 #ifdef CONFIG_X86_32
550 	char *p = (char *)__pa_nodebug(ucode_name);
551 #else
552 	char *p = ucode_name;
553 #endif
554 
555 	cd.data = NULL;
556 	cd.size = 0;
557 
558 	/* try built-in microcode if no initrd */
559 	if (!size) {
560 		if (!load_builtin_intel_microcode(&cd))
561 			return UCODE_ERROR;
562 	} else {
563 		cd = find_cpio_data(p, (void *)start, size, &offset);
564 		if (!cd.data)
565 			return UCODE_ERROR;
566 	}
567 
568 	return get_matching_model_microcode(0, start, cd.data, cd.size,
569 					    mc_saved_data, initrd, uci);
570 }
571 
572 /*
573  * Print ucode update info.
574  */
575 static void
print_ucode_info(struct ucode_cpu_info * uci,unsigned int date)576 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
577 {
578 	int cpu = smp_processor_id();
579 
580 	pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
581 		cpu,
582 		uci->cpu_sig.rev,
583 		date & 0xffff,
584 		date >> 24,
585 		(date >> 16) & 0xff);
586 }
587 
588 #ifdef CONFIG_X86_32
589 
590 static int delay_ucode_info;
591 static int current_mc_date;
592 
593 /*
594  * Print early updated ucode info after printk works. This is delayed info dump.
595  */
show_ucode_info_early(void)596 void show_ucode_info_early(void)
597 {
598 	struct ucode_cpu_info uci;
599 
600 	if (delay_ucode_info) {
601 		collect_cpu_info_early(&uci);
602 		print_ucode_info(&uci, current_mc_date);
603 		delay_ucode_info = 0;
604 	}
605 }
606 
607 /*
608  * At this point, we can not call printk() yet. Keep microcode patch number in
609  * mc_saved_data.mc_saved and delay printing microcode info in
610  * show_ucode_info_early() until printk() works.
611  */
print_ucode(struct ucode_cpu_info * uci)612 static void print_ucode(struct ucode_cpu_info *uci)
613 {
614 	struct microcode_intel *mc_intel;
615 	int *delay_ucode_info_p;
616 	int *current_mc_date_p;
617 
618 	mc_intel = uci->mc;
619 	if (mc_intel == NULL)
620 		return;
621 
622 	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
623 	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
624 
625 	*delay_ucode_info_p = 1;
626 	*current_mc_date_p = mc_intel->hdr.date;
627 }
628 #else
629 
630 /*
631  * Flush global tlb. We only do this in x86_64 where paging has been enabled
632  * already and PGE should be enabled as well.
633  */
flush_tlb_early(void)634 static inline void flush_tlb_early(void)
635 {
636 	__native_flush_tlb_global_irq_disabled();
637 }
638 
print_ucode(struct ucode_cpu_info * uci)639 static inline void print_ucode(struct ucode_cpu_info *uci)
640 {
641 	struct microcode_intel *mc_intel;
642 
643 	mc_intel = uci->mc;
644 	if (mc_intel == NULL)
645 		return;
646 
647 	print_ucode_info(uci, mc_intel->hdr.date);
648 }
649 #endif
650 
apply_microcode_early(struct ucode_cpu_info * uci,bool early)651 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
652 {
653 	struct microcode_intel *mc_intel;
654 	unsigned int val[2];
655 
656 	mc_intel = uci->mc;
657 	if (mc_intel == NULL)
658 		return 0;
659 
660 	/* write microcode via MSR 0x79 */
661 	native_wrmsr(MSR_IA32_UCODE_WRITE,
662 	      (unsigned long) mc_intel->bits,
663 	      (unsigned long) mc_intel->bits >> 16 >> 16);
664 	native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
665 
666 	/* As documented in the SDM: Do a CPUID 1 here */
667 	sync_core();
668 
669 	/* get the current revision from MSR 0x8B */
670 	native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
671 	if (val[1] != mc_intel->hdr.rev)
672 		return -1;
673 
674 #ifdef CONFIG_X86_64
675 	/* Flush global tlb. This is precaution. */
676 	flush_tlb_early();
677 #endif
678 	uci->cpu_sig.rev = val[1];
679 
680 	if (early)
681 		print_ucode(uci);
682 	else
683 		print_ucode_info(uci, mc_intel->hdr.date);
684 
685 	return 0;
686 }
687 
688 /*
689  * This function converts microcode patch offsets previously stored in
690  * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
691  */
save_microcode_in_initrd_intel(void)692 int __init save_microcode_in_initrd_intel(void)
693 {
694 	unsigned int count = mc_saved_data.mc_saved_count;
695 	struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
696 	int ret = 0;
697 
698 	if (count == 0)
699 		return ret;
700 
701 	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
702 	ret = save_microcode(&mc_saved_data, mc_saved, count);
703 	if (ret)
704 		pr_err("Cannot save microcode patches from initrd.\n");
705 
706 	show_saved_mc();
707 
708 	return ret;
709 }
710 
711 static void __init
_load_ucode_intel_bsp(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long start,unsigned long size)712 _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
713 		      unsigned long *initrd,
714 		      unsigned long start, unsigned long size)
715 {
716 	struct ucode_cpu_info uci;
717 	enum ucode_state ret;
718 
719 	collect_cpu_info_early(&uci);
720 
721 	ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
722 	if (ret != UCODE_OK)
723 		return;
724 
725 	ret = load_microcode(mc_saved_data, initrd, start, &uci);
726 	if (ret != UCODE_OK)
727 		return;
728 
729 	apply_microcode_early(&uci, true);
730 }
731 
load_ucode_intel_bsp(void)732 void __init load_ucode_intel_bsp(void)
733 {
734 	u64 start, size;
735 #ifdef CONFIG_X86_32
736 	struct boot_params *p;
737 
738 	p	= (struct boot_params *)__pa_nodebug(&boot_params);
739 	size	= p->hdr.ramdisk_size;
740 
741 	/*
742 	 * Set start only if we have an initrd image. We cannot use initrd_start
743 	 * because it is not set that early yet.
744 	 */
745 	start	= (size ? p->hdr.ramdisk_image : 0);
746 
747 	_load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
748 			      (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
749 			      start, size);
750 #else
751 	size	= boot_params.hdr.ramdisk_size;
752 	start	= (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
753 
754 	_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
755 #endif
756 }
757 
load_ucode_intel_ap(void)758 void load_ucode_intel_ap(void)
759 {
760 	struct mc_saved_data *mc_saved_data_p;
761 	struct ucode_cpu_info uci;
762 	unsigned long *mc_saved_in_initrd_p;
763 	enum ucode_state ret;
764 #ifdef CONFIG_X86_32
765 
766 	mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
767 	mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
768 #else
769 	mc_saved_in_initrd_p = mc_saved_in_initrd;
770 	mc_saved_data_p = &mc_saved_data;
771 #endif
772 
773 	/*
774 	 * If there is no valid ucode previously saved in memory, no need to
775 	 * update ucode on this AP.
776 	 */
777 	if (mc_saved_data_p->mc_saved_count == 0)
778 		return;
779 
780 	collect_cpu_info_early(&uci);
781 	ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
782 			     get_initrd_start_addr(), &uci);
783 
784 	if (ret != UCODE_OK)
785 		return;
786 
787 	apply_microcode_early(&uci, true);
788 }
789 
reload_ucode_intel(void)790 void reload_ucode_intel(void)
791 {
792 	struct ucode_cpu_info uci;
793 	enum ucode_state ret;
794 
795 	if (!mc_saved_data.mc_saved_count)
796 		return;
797 
798 	collect_cpu_info_early(&uci);
799 
800 	ret = load_microcode_early(mc_saved_data.mc_saved,
801 				   mc_saved_data.mc_saved_count, &uci);
802 	if (ret != UCODE_OK)
803 		return;
804 
805 	apply_microcode_early(&uci, false);
806 }
807 
collect_cpu_info(int cpu_num,struct cpu_signature * csig)808 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
809 {
810 	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
811 	unsigned int val[2];
812 
813 	memset(csig, 0, sizeof(*csig));
814 
815 	csig->sig = cpuid_eax(0x00000001);
816 
817 	if ((c->x86_model >= 5) || (c->x86 > 6)) {
818 		/* get processor flags from MSR 0x17 */
819 		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
820 		csig->pf = 1 << ((val[1] >> 18) & 7);
821 	}
822 
823 	csig->rev = c->microcode;
824 	pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
825 		cpu_num, csig->sig, csig->pf, csig->rev);
826 
827 	return 0;
828 }
829 
830 /*
831  * return 0 - no update found
832  * return 1 - found update
833  */
get_matching_mc(struct microcode_intel * mc_intel,int cpu)834 static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
835 {
836 	struct cpu_signature cpu_sig;
837 	unsigned int csig, cpf, crev;
838 
839 	collect_cpu_info(cpu, &cpu_sig);
840 
841 	csig = cpu_sig.sig;
842 	cpf = cpu_sig.pf;
843 	crev = cpu_sig.rev;
844 
845 	return has_newer_microcode(mc_intel, csig, cpf, crev);
846 }
847 
apply_microcode_intel(int cpu)848 static int apply_microcode_intel(int cpu)
849 {
850 	struct microcode_intel *mc_intel;
851 	struct ucode_cpu_info *uci;
852 	unsigned int val[2];
853 	int cpu_num = raw_smp_processor_id();
854 	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
855 
856 	uci = ucode_cpu_info + cpu;
857 	mc_intel = uci->mc;
858 
859 	/* We should bind the task to the CPU */
860 	BUG_ON(cpu_num != cpu);
861 
862 	if (mc_intel == NULL)
863 		return 0;
864 
865 	/*
866 	 * Microcode on this CPU could be updated earlier. Only apply the
867 	 * microcode patch in mc_intel when it is newer than the one on this
868 	 * CPU.
869 	 */
870 	if (get_matching_mc(mc_intel, cpu) == 0)
871 		return 0;
872 
873 	/* write microcode via MSR 0x79 */
874 	wrmsr(MSR_IA32_UCODE_WRITE,
875 	      (unsigned long) mc_intel->bits,
876 	      (unsigned long) mc_intel->bits >> 16 >> 16);
877 	wrmsr(MSR_IA32_UCODE_REV, 0, 0);
878 
879 	/* As documented in the SDM: Do a CPUID 1 here */
880 	sync_core();
881 
882 	/* get the current revision from MSR 0x8B */
883 	rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
884 
885 	if (val[1] != mc_intel->hdr.rev) {
886 		pr_err("CPU%d update to revision 0x%x failed\n",
887 		       cpu_num, mc_intel->hdr.rev);
888 		return -1;
889 	}
890 	pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
891 		cpu_num, val[1],
892 		mc_intel->hdr.date & 0xffff,
893 		mc_intel->hdr.date >> 24,
894 		(mc_intel->hdr.date >> 16) & 0xff);
895 
896 	uci->cpu_sig.rev = val[1];
897 	c->microcode = val[1];
898 
899 	return 0;
900 }
901 
generic_load_microcode(int cpu,void * data,size_t size,int (* get_ucode_data)(void *,const void *,size_t))902 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
903 				int (*get_ucode_data)(void *, const void *, size_t))
904 {
905 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
906 	u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
907 	int new_rev = uci->cpu_sig.rev;
908 	unsigned int leftover = size;
909 	enum ucode_state state = UCODE_OK;
910 	unsigned int curr_mc_size = 0;
911 	unsigned int csig, cpf;
912 
913 	while (leftover) {
914 		struct microcode_header_intel mc_header;
915 		unsigned int mc_size;
916 
917 		if (leftover < sizeof(mc_header)) {
918 			pr_err("error! Truncated header in microcode data file\n");
919 			break;
920 		}
921 
922 		if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
923 			break;
924 
925 		mc_size = get_totalsize(&mc_header);
926 		if (!mc_size || mc_size > leftover) {
927 			pr_err("error! Bad data in microcode data file\n");
928 			break;
929 		}
930 
931 		/* For performance reasons, reuse mc area when possible */
932 		if (!mc || mc_size > curr_mc_size) {
933 			vfree(mc);
934 			mc = vmalloc(mc_size);
935 			if (!mc)
936 				break;
937 			curr_mc_size = mc_size;
938 		}
939 
940 		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
941 		    microcode_sanity_check(mc, 1) < 0) {
942 			break;
943 		}
944 
945 		csig = uci->cpu_sig.sig;
946 		cpf = uci->cpu_sig.pf;
947 		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
948 			vfree(new_mc);
949 			new_rev = mc_header.rev;
950 			new_mc  = mc;
951 			mc = NULL;	/* trigger new vmalloc */
952 		}
953 
954 		ucode_ptr += mc_size;
955 		leftover  -= mc_size;
956 	}
957 
958 	vfree(mc);
959 
960 	if (leftover) {
961 		vfree(new_mc);
962 		state = UCODE_ERROR;
963 		goto out;
964 	}
965 
966 	if (!new_mc) {
967 		state = UCODE_NFOUND;
968 		goto out;
969 	}
970 
971 	vfree(uci->mc);
972 	uci->mc = (struct microcode_intel *)new_mc;
973 
974 	/*
975 	 * If early loading microcode is supported, save this mc into
976 	 * permanent memory. So it will be loaded early when a CPU is hot added
977 	 * or resumes.
978 	 */
979 	save_mc_for_early(new_mc);
980 
981 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
982 		 cpu, new_rev, uci->cpu_sig.rev);
983 out:
984 	return state;
985 }
986 
get_ucode_fw(void * to,const void * from,size_t n)987 static int get_ucode_fw(void *to, const void *from, size_t n)
988 {
989 	memcpy(to, from, n);
990 	return 0;
991 }
992 
request_microcode_fw(int cpu,struct device * device,bool refresh_fw)993 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
994 					     bool refresh_fw)
995 {
996 	char name[30];
997 	struct cpuinfo_x86 *c = &cpu_data(cpu);
998 	const struct firmware *firmware;
999 	enum ucode_state ret;
1000 
1001 	sprintf(name, "intel-ucode/%02x-%02x-%02x",
1002 		c->x86, c->x86_model, c->x86_mask);
1003 
1004 	if (request_firmware_direct(&firmware, name, device)) {
1005 		pr_debug("data file %s load failed\n", name);
1006 		return UCODE_NFOUND;
1007 	}
1008 
1009 	ret = generic_load_microcode(cpu, (void *)firmware->data,
1010 				     firmware->size, &get_ucode_fw);
1011 
1012 	release_firmware(firmware);
1013 
1014 	return ret;
1015 }
1016 
get_ucode_user(void * to,const void * from,size_t n)1017 static int get_ucode_user(void *to, const void *from, size_t n)
1018 {
1019 	return copy_from_user(to, from, n);
1020 }
1021 
1022 static enum ucode_state
request_microcode_user(int cpu,const void __user * buf,size_t size)1023 request_microcode_user(int cpu, const void __user *buf, size_t size)
1024 {
1025 	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1026 }
1027 
microcode_fini_cpu(int cpu)1028 static void microcode_fini_cpu(int cpu)
1029 {
1030 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1031 
1032 	vfree(uci->mc);
1033 	uci->mc = NULL;
1034 }
1035 
1036 static struct microcode_ops microcode_intel_ops = {
1037 	.request_microcode_user		  = request_microcode_user,
1038 	.request_microcode_fw             = request_microcode_fw,
1039 	.collect_cpu_info                 = collect_cpu_info,
1040 	.apply_microcode                  = apply_microcode_intel,
1041 	.microcode_fini_cpu               = microcode_fini_cpu,
1042 };
1043 
init_intel_microcode(void)1044 struct microcode_ops * __init init_intel_microcode(void)
1045 {
1046 	struct cpuinfo_x86 *c = &boot_cpu_data;
1047 
1048 	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1049 	    cpu_has(c, X86_FEATURE_IA64)) {
1050 		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1051 		return NULL;
1052 	}
1053 
1054 	return &microcode_intel_ops;
1055 }
1056 
1057