1/* 2 * Copyright (C) 2013 Advanced Micro Devices, Inc. 3 * 4 * Author: Jacob Shin <jacob.shin@amd.com> 5 * Fixes: Borislav Petkov <bp@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/earlycpio.h> 13#include <linux/initrd.h> 14 15#include <asm/cpu.h> 16#include <asm/setup.h> 17#include <asm/microcode_amd.h> 18 19/* 20 * This points to the current valid container of microcode patches which we will 21 * save from the initrd before jettisoning its contents. 22 */ 23static u8 *container; 24static size_t container_size; 25 26static u32 ucode_new_rev; 27u8 amd_ucode_patch[PATCH_MAX_SIZE]; 28static u16 this_equiv_id; 29 30static struct cpio_data ucode_cpio; 31 32/* 33 * Microcode patch container file is prepended to the initrd in cpio format. 34 * See Documentation/x86/early-microcode.txt 35 */ 36static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; 37 38static struct cpio_data __init find_ucode_in_initrd(void) 39{ 40 long offset = 0; 41 char *path; 42 void *start; 43 size_t size; 44 45#ifdef CONFIG_X86_32 46 struct boot_params *p; 47 48 /* 49 * On 32-bit, early load occurs before paging is turned on so we need 50 * to use physical addresses. 51 */ 52 p = (struct boot_params *)__pa_nodebug(&boot_params); 53 path = (char *)__pa_nodebug(ucode_path); 54 start = (void *)p->hdr.ramdisk_image; 55 size = p->hdr.ramdisk_size; 56#else 57 path = ucode_path; 58 start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); 59 size = boot_params.hdr.ramdisk_size; 60#endif 61 62 return find_cpio_data(path, start, size, &offset); 63} 64 65static size_t compute_container_size(u8 *data, u32 total_size) 66{ 67 size_t size = 0; 68 u32 *header = (u32 *)data; 69 70 if (header[0] != UCODE_MAGIC || 71 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 72 header[2] == 0) /* size */ 73 return size; 74 75 size = header[2] + CONTAINER_HDR_SZ; 76 total_size -= size; 77 data += size; 78 79 while (total_size) { 80 u16 patch_size; 81 82 header = (u32 *)data; 83 84 if (header[0] != UCODE_UCODE_TYPE) 85 break; 86 87 /* 88 * Sanity-check patch size. 89 */ 90 patch_size = header[1]; 91 if (patch_size > PATCH_MAX_SIZE) 92 break; 93 94 size += patch_size + SECTION_HDR_SIZE; 95 data += patch_size + SECTION_HDR_SIZE; 96 total_size -= patch_size + SECTION_HDR_SIZE; 97 } 98 99 return size; 100} 101 102/* 103 * Early load occurs before we can vmalloc(). So we look for the microcode 104 * patch container file in initrd, traverse equivalent cpu table, look for a 105 * matching microcode patch, and update, all in initrd memory in place. 106 * When vmalloc() is available for use later -- on 64-bit during first AP load, 107 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 108 * load_microcode_amd() to save equivalent cpu table and microcode patches in 109 * kernel heap memory. 110 */ 111static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) 112{ 113 struct equiv_cpu_entry *eq; 114 size_t *cont_sz; 115 u32 *header; 116 u8 *data, **cont; 117 u8 (*patch)[PATCH_MAX_SIZE]; 118 u16 eq_id = 0; 119 int offset, left; 120 u32 rev, eax, ebx, ecx, edx; 121 u32 *new_rev; 122 123#ifdef CONFIG_X86_32 124 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); 125 cont_sz = (size_t *)__pa_nodebug(&container_size); 126 cont = (u8 **)__pa_nodebug(&container); 127 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); 128#else 129 new_rev = &ucode_new_rev; 130 cont_sz = &container_size; 131 cont = &container; 132 patch = &amd_ucode_patch; 133#endif 134 135 data = ucode; 136 left = size; 137 header = (u32 *)data; 138 139 /* find equiv cpu table */ 140 if (header[0] != UCODE_MAGIC || 141 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 142 header[2] == 0) /* size */ 143 return; 144 145 eax = 0x00000001; 146 ecx = 0; 147 native_cpuid(&eax, &ebx, &ecx, &edx); 148 149 while (left > 0) { 150 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); 151 152 *cont = data; 153 154 /* Advance past the container header */ 155 offset = header[2] + CONTAINER_HDR_SZ; 156 data += offset; 157 left -= offset; 158 159 eq_id = find_equiv_id(eq, eax); 160 if (eq_id) { 161 this_equiv_id = eq_id; 162 *cont_sz = compute_container_size(*cont, left + offset); 163 164 /* 165 * truncate how much we need to iterate over in the 166 * ucode update loop below 167 */ 168 left = *cont_sz - offset; 169 break; 170 } 171 172 /* 173 * support multiple container files appended together. if this 174 * one does not have a matching equivalent cpu entry, we fast 175 * forward to the next container file. 176 */ 177 while (left > 0) { 178 header = (u32 *)data; 179 if (header[0] == UCODE_MAGIC && 180 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) 181 break; 182 183 offset = header[1] + SECTION_HDR_SIZE; 184 data += offset; 185 left -= offset; 186 } 187 188 /* mark where the next microcode container file starts */ 189 offset = data - (u8 *)ucode; 190 ucode = data; 191 } 192 193 if (!eq_id) { 194 *cont = NULL; 195 *cont_sz = 0; 196 return; 197 } 198 199 /* find ucode and update if needed */ 200 201 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 202 203 while (left > 0) { 204 struct microcode_amd *mc; 205 206 header = (u32 *)data; 207 if (header[0] != UCODE_UCODE_TYPE || /* type */ 208 header[1] == 0) /* size */ 209 break; 210 211 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); 212 213 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { 214 215 if (!__apply_microcode_amd(mc)) { 216 rev = mc->hdr.patch_id; 217 *new_rev = rev; 218 219 if (save_patch) 220 memcpy(patch, mc, 221 min_t(u32, header[1], PATCH_MAX_SIZE)); 222 } 223 } 224 225 offset = header[1] + SECTION_HDR_SIZE; 226 data += offset; 227 left -= offset; 228 } 229} 230 231void __init load_ucode_amd_bsp(void) 232{ 233 struct cpio_data cp; 234 void **data; 235 size_t *size; 236 237#ifdef CONFIG_X86_32 238 data = (void **)__pa_nodebug(&ucode_cpio.data); 239 size = (size_t *)__pa_nodebug(&ucode_cpio.size); 240#else 241 data = &ucode_cpio.data; 242 size = &ucode_cpio.size; 243#endif 244 245 cp = find_ucode_in_initrd(); 246 if (!cp.data) 247 return; 248 249 *data = cp.data; 250 *size = cp.size; 251 252 apply_ucode_in_initrd(cp.data, cp.size, true); 253} 254 255#ifdef CONFIG_X86_32 256/* 257 * On 32-bit, since AP's early load occurs before paging is turned on, we 258 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during 259 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During 260 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, 261 * which is used upon resume from suspend. 262 */ 263void load_ucode_amd_ap(void) 264{ 265 struct microcode_amd *mc; 266 size_t *usize; 267 void **ucode; 268 269 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); 270 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { 271 __apply_microcode_amd(mc); 272 return; 273 } 274 275 ucode = (void *)__pa_nodebug(&container); 276 usize = (size_t *)__pa_nodebug(&container_size); 277 278 if (!*ucode || !*usize) 279 return; 280 281 apply_ucode_in_initrd(*ucode, *usize, false); 282} 283 284static void __init collect_cpu_sig_on_bsp(void *arg) 285{ 286 unsigned int cpu = smp_processor_id(); 287 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 288 289 uci->cpu_sig.sig = cpuid_eax(0x00000001); 290} 291 292static void __init get_bsp_sig(void) 293{ 294 unsigned int bsp = boot_cpu_data.cpu_index; 295 struct ucode_cpu_info *uci = ucode_cpu_info + bsp; 296 297 if (!uci->cpu_sig.sig) 298 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); 299} 300#else 301void load_ucode_amd_ap(void) 302{ 303 unsigned int cpu = smp_processor_id(); 304 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 305 struct equiv_cpu_entry *eq; 306 struct microcode_amd *mc; 307 u32 rev, eax; 308 u16 eq_id; 309 310 /* Exit if called on the BSP. */ 311 if (!cpu) 312 return; 313 314 if (!container) 315 return; 316 317 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 318 319 uci->cpu_sig.rev = rev; 320 uci->cpu_sig.sig = eax; 321 322 eax = cpuid_eax(0x00000001); 323 eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); 324 325 eq_id = find_equiv_id(eq, eax); 326 if (!eq_id) 327 return; 328 329 if (eq_id == this_equiv_id) { 330 mc = (struct microcode_amd *)amd_ucode_patch; 331 332 if (mc && rev < mc->hdr.patch_id) { 333 if (!__apply_microcode_amd(mc)) 334 ucode_new_rev = mc->hdr.patch_id; 335 } 336 337 } else { 338 if (!ucode_cpio.data) 339 return; 340 341 /* 342 * AP has a different equivalence ID than BSP, looks like 343 * mixed-steppings silicon so go through the ucode blob anew. 344 */ 345 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); 346 } 347} 348#endif 349 350int __init save_microcode_in_initrd_amd(void) 351{ 352 unsigned long cont; 353 int retval = 0; 354 enum ucode_state ret; 355 u8 *cont_va; 356 u32 eax; 357 358 if (!container) 359 return -EINVAL; 360 361#ifdef CONFIG_X86_32 362 get_bsp_sig(); 363 cont = (unsigned long)container; 364 cont_va = __va(container); 365#else 366 /* 367 * We need the physical address of the container for both bitness since 368 * boot_params.hdr.ramdisk_image is a physical address. 369 */ 370 cont = __pa(container); 371 cont_va = container; 372#endif 373 374 /* 375 * Take into account the fact that the ramdisk might get relocated and 376 * therefore we need to recompute the container's position in virtual 377 * memory space. 378 */ 379 if (relocated_ramdisk) 380 container = (u8 *)(__va(relocated_ramdisk) + 381 (cont - boot_params.hdr.ramdisk_image)); 382 else 383 container = cont_va; 384 385 if (ucode_new_rev) 386 pr_info("microcode: updated early to new patch_level=0x%08x\n", 387 ucode_new_rev); 388 389 eax = cpuid_eax(0x00000001); 390 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 391 392 ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); 393 if (ret != UCODE_OK) 394 retval = -EINVAL; 395 396 /* 397 * This will be freed any msec now, stash patches for the current 398 * family and switch to patch cache for cpu hotplug, etc later. 399 */ 400 container = NULL; 401 container_size = 0; 402 403 return retval; 404} 405 406void reload_ucode_amd(void) 407{ 408 struct microcode_amd *mc; 409 u32 rev, eax; 410 411 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 412 413 mc = (struct microcode_amd *)amd_ucode_patch; 414 415 if (mc && rev < mc->hdr.patch_id) { 416 if (!__apply_microcode_amd(mc)) { 417 ucode_new_rev = mc->hdr.patch_id; 418 pr_info("microcode: reload patch_level=0x%08x\n", 419 ucode_new_rev); 420 } 421 } 422} 423