1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD CPU Microcode Update Driver for Linux
4 *
5 * This driver allows to upgrade microcode on F10h AMD
6 * CPUs and later.
7 *
8 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9 * 2013-2018 Borislav Petkov <bp@alien8.de>
10 *
11 * Author: Peter Oruba <peter.oruba@amd.com>
12 *
13 * Based on work by:
14 * Tigran Aivazian <aivazian.tigran@gmail.com>
15 *
16 * early loader:
17 * Copyright (C) 2013 Advanced Micro Devices, Inc.
18 *
19 * Author: Jacob Shin <jacob.shin@amd.com>
20 * Fixes: Borislav Petkov <bp@suse.de>
21 */
22#define pr_fmt(fmt) "microcode: " fmt
23
24#include <linux/earlycpio.h>
25#include <linux/firmware.h>
26#include <linux/uaccess.h>
27#include <linux/vmalloc.h>
28#include <linux/initrd.h>
29#include <linux/kernel.h>
30#include <linux/pci.h>
31
32#include <asm/microcode.h>
33#include <asm/processor.h>
34#include <asm/setup.h>
35#include <asm/cpu.h>
36#include <asm/msr.h>
37
38#include "internal.h"
39
40struct ucode_patch {
41 struct list_head plist;
42 void *data;
43 unsigned int size;
44 u32 patch_id;
45 u16 equiv_cpu;
46};
47
48static LIST_HEAD(microcode_cache);
49
50#define UCODE_MAGIC 0x00414d44
51#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
52#define UCODE_UCODE_TYPE 0x00000001
53
54#define SECTION_HDR_SIZE 8
55#define CONTAINER_HDR_SZ 12
56
57struct equiv_cpu_entry {
58 u32 installed_cpu;
59 u32 fixed_errata_mask;
60 u32 fixed_errata_compare;
61 u16 equiv_cpu;
62 u16 res;
63} __packed;
64
65struct microcode_header_amd {
66 u32 data_code;
67 u32 patch_id;
68 u16 mc_patch_data_id;
69 u8 mc_patch_data_len;
70 u8 init_flag;
71 u32 mc_patch_data_checksum;
72 u32 nb_dev_id;
73 u32 sb_dev_id;
74 u16 processor_rev_id;
75 u8 nb_rev_id;
76 u8 sb_rev_id;
77 u8 bios_api_rev;
78 u8 reserved1[3];
79 u32 match_reg[8];
80} __packed;
81
82struct microcode_amd {
83 struct microcode_header_amd hdr;
84 unsigned int mpb[];
85};
86
87#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
88
89static struct equiv_cpu_table {
90 unsigned int num_entries;
91 struct equiv_cpu_entry *entry;
92} equiv_table;
93
94/*
95 * This points to the current valid container of microcode patches which we will
96 * save from the initrd/builtin before jettisoning its contents. @mc is the
97 * microcode patch we found to match.
98 */
99struct cont_desc {
100 struct microcode_amd *mc;
101 u32 cpuid_1_eax;
102 u32 psize;
103 u8 *data;
104 size_t size;
105};
106
107static u32 ucode_new_rev;
108
109/*
110 * Microcode patch container file is prepended to the initrd in cpio
111 * format. See Documentation/arch/x86/microcode.rst
112 */
113static const char
114ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
115
116static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
117{
118 unsigned int i;
119
120 if (!et || !et->num_entries)
121 return 0;
122
123 for (i = 0; i < et->num_entries; i++) {
124 struct equiv_cpu_entry *e = &et->entry[i];
125
126 if (sig == e->installed_cpu)
127 return e->equiv_cpu;
128 }
129 return 0;
130}
131
132/*
133 * Check whether there is a valid microcode container file at the beginning
134 * of @buf of size @buf_size.
135 */
136static bool verify_container(const u8 *buf, size_t buf_size)
137{
138 u32 cont_magic;
139
140 if (buf_size <= CONTAINER_HDR_SZ) {
141 pr_debug("Truncated microcode container header.\n");
142 return false;
143 }
144
145 cont_magic = *(const u32 *)buf;
146 if (cont_magic != UCODE_MAGIC) {
147 pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
148 return false;
149 }
150
151 return true;
152}
153
154/*
155 * Check whether there is a valid, non-truncated CPU equivalence table at the
156 * beginning of @buf of size @buf_size.
157 */
158static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
159{
160 const u32 *hdr = (const u32 *)buf;
161 u32 cont_type, equiv_tbl_len;
162
163 if (!verify_container(buf, buf_size))
164 return false;
165
166 cont_type = hdr[1];
167 if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
168 pr_debug("Wrong microcode container equivalence table type: %u.\n",
169 cont_type);
170 return false;
171 }
172
173 buf_size -= CONTAINER_HDR_SZ;
174
175 equiv_tbl_len = hdr[2];
176 if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
177 buf_size < equiv_tbl_len) {
178 pr_debug("Truncated equivalence table.\n");
179 return false;
180 }
181
182 return true;
183}
184
185/*
186 * Check whether there is a valid, non-truncated microcode patch section at the
187 * beginning of @buf of size @buf_size.
188 *
189 * On success, @sh_psize returns the patch size according to the section header,
190 * to the caller.
191 */
192static bool
193__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
194{
195 u32 p_type, p_size;
196 const u32 *hdr;
197
198 if (buf_size < SECTION_HDR_SIZE) {
199 pr_debug("Truncated patch section.\n");
200 return false;
201 }
202
203 hdr = (const u32 *)buf;
204 p_type = hdr[0];
205 p_size = hdr[1];
206
207 if (p_type != UCODE_UCODE_TYPE) {
208 pr_debug("Invalid type field (0x%x) in container file section header.\n",
209 p_type);
210 return false;
211 }
212
213 if (p_size < sizeof(struct microcode_header_amd)) {
214 pr_debug("Patch of size %u too short.\n", p_size);
215 return false;
216 }
217
218 *sh_psize = p_size;
219
220 return true;
221}
222
223/*
224 * Check whether the passed remaining file @buf_size is large enough to contain
225 * a patch of the indicated @sh_psize (and also whether this size does not
226 * exceed the per-family maximum). @sh_psize is the size read from the section
227 * header.
228 */
229static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
230{
231 u32 max_size;
232
233 if (family >= 0x15)
234 return min_t(u32, sh_psize, buf_size);
235
236#define F1XH_MPB_MAX_SIZE 2048
237#define F14H_MPB_MAX_SIZE 1824
238
239 switch (family) {
240 case 0x10 ... 0x12:
241 max_size = F1XH_MPB_MAX_SIZE;
242 break;
243 case 0x14:
244 max_size = F14H_MPB_MAX_SIZE;
245 break;
246 default:
247 WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
248 return 0;
249 }
250
251 if (sh_psize > min_t(u32, buf_size, max_size))
252 return 0;
253
254 return sh_psize;
255}
256
257/*
258 * Verify the patch in @buf.
259 *
260 * Returns:
261 * negative: on error
262 * positive: patch is not for this family, skip it
263 * 0: success
264 */
265static int
266verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
267{
268 struct microcode_header_amd *mc_hdr;
269 unsigned int ret;
270 u32 sh_psize;
271 u16 proc_id;
272 u8 patch_fam;
273
274 if (!__verify_patch_section(buf, buf_size, sh_psize: &sh_psize))
275 return -1;
276
277 /*
278 * The section header length is not included in this indicated size
279 * but is present in the leftover file length so we need to subtract
280 * it before passing this value to the function below.
281 */
282 buf_size -= SECTION_HDR_SIZE;
283
284 /*
285 * Check if the remaining buffer is big enough to contain a patch of
286 * size sh_psize, as the section claims.
287 */
288 if (buf_size < sh_psize) {
289 pr_debug("Patch of size %u truncated.\n", sh_psize);
290 return -1;
291 }
292
293 ret = __verify_patch_size(family, sh_psize, buf_size);
294 if (!ret) {
295 pr_debug("Per-family patch size mismatch.\n");
296 return -1;
297 }
298
299 *patch_size = sh_psize;
300
301 mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
302 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
303 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
304 return -1;
305 }
306
307 proc_id = mc_hdr->processor_rev_id;
308 patch_fam = 0xf + (proc_id >> 12);
309 if (patch_fam != family)
310 return 1;
311
312 return 0;
313}
314
315/*
316 * This scans the ucode blob for the proper container as we can have multiple
317 * containers glued together. Returns the equivalence ID from the equivalence
318 * table or 0 if none found.
319 * Returns the amount of bytes consumed while scanning. @desc contains all the
320 * data we're going to use in later stages of the application.
321 */
322static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
323{
324 struct equiv_cpu_table table;
325 size_t orig_size = size;
326 u32 *hdr = (u32 *)ucode;
327 u16 eq_id;
328 u8 *buf;
329
330 if (!verify_equivalence_table(buf: ucode, buf_size: size))
331 return 0;
332
333 buf = ucode;
334
335 table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
336 table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
337
338 /*
339 * Find the equivalence ID of our CPU in this table. Even if this table
340 * doesn't contain a patch for the CPU, scan through the whole container
341 * so that it can be skipped in case there are other containers appended.
342 */
343 eq_id = find_equiv_id(et: &table, sig: desc->cpuid_1_eax);
344
345 buf += hdr[2] + CONTAINER_HDR_SZ;
346 size -= hdr[2] + CONTAINER_HDR_SZ;
347
348 /*
349 * Scan through the rest of the container to find where it ends. We do
350 * some basic sanity-checking too.
351 */
352 while (size > 0) {
353 struct microcode_amd *mc;
354 u32 patch_size;
355 int ret;
356
357 ret = verify_patch(family: x86_family(sig: desc->cpuid_1_eax), buf, buf_size: size, patch_size: &patch_size);
358 if (ret < 0) {
359 /*
360 * Patch verification failed, skip to the next container, if
361 * there is one. Before exit, check whether that container has
362 * found a patch already. If so, use it.
363 */
364 goto out;
365 } else if (ret > 0) {
366 goto skip;
367 }
368
369 mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
370 if (eq_id == mc->hdr.processor_rev_id) {
371 desc->psize = patch_size;
372 desc->mc = mc;
373 }
374
375skip:
376 /* Skip patch section header too: */
377 buf += patch_size + SECTION_HDR_SIZE;
378 size -= patch_size + SECTION_HDR_SIZE;
379 }
380
381out:
382 /*
383 * If we have found a patch (desc->mc), it means we're looking at the
384 * container which has a patch for this CPU so return 0 to mean, @ucode
385 * already points to the proper container. Otherwise, we return the size
386 * we scanned so that we can advance to the next container in the
387 * buffer.
388 */
389 if (desc->mc) {
390 desc->data = ucode;
391 desc->size = orig_size - size;
392
393 return 0;
394 }
395
396 return orig_size - size;
397}
398
399/*
400 * Scan the ucode blob for the proper container as we can have multiple
401 * containers glued together.
402 */
403static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
404{
405 while (size) {
406 size_t s = parse_container(ucode, size, desc);
407 if (!s)
408 return;
409
410 /* catch wraparound */
411 if (size >= s) {
412 ucode += s;
413 size -= s;
414 } else {
415 return;
416 }
417 }
418}
419
420static int __apply_microcode_amd(struct microcode_amd *mc)
421{
422 u32 rev, dummy;
423
424 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
425
426 /* verify patch application was successful */
427 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
428 if (rev != mc->hdr.patch_id)
429 return -1;
430
431 return 0;
432}
433
434/*
435 * Early load occurs before we can vmalloc(). So we look for the microcode
436 * patch container file in initrd, traverse equivalent cpu table, look for a
437 * matching microcode patch, and update, all in initrd memory in place.
438 * When vmalloc() is available for use later -- on 64-bit during first AP load,
439 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
440 * load_microcode_amd() to save equivalent cpu table and microcode patches in
441 * kernel heap memory.
442 *
443 * Returns true if container found (sets @desc), false otherwise.
444 */
445static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
446{
447 struct cont_desc desc = { 0 };
448 struct microcode_amd *mc;
449 bool ret = false;
450 u32 rev, dummy;
451
452 desc.cpuid_1_eax = cpuid_1_eax;
453
454 scan_containers(ucode, size, desc: &desc);
455
456 mc = desc.mc;
457 if (!mc)
458 return ret;
459
460 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
461
462 /*
463 * Allow application of the same revision to pick up SMT-specific
464 * changes even if the revision of the other SMT thread is already
465 * up-to-date.
466 */
467 if (rev > mc->hdr.patch_id)
468 return ret;
469
470 if (!__apply_microcode_amd(mc)) {
471 ucode_new_rev = mc->hdr.patch_id;
472 ret = true;
473 }
474
475 return ret;
476}
477
478static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
479{
480 char fw_name[36] = "amd-ucode/microcode_amd.bin";
481 struct firmware fw;
482
483 if (IS_ENABLED(CONFIG_X86_32))
484 return false;
485
486 if (family >= 0x15)
487 snprintf(buf: fw_name, size: sizeof(fw_name),
488 fmt: "amd-ucode/microcode_amd_fam%02hhxh.bin", family);
489
490 if (firmware_request_builtin(fw: &fw, name: fw_name)) {
491 cp->size = fw.size;
492 cp->data = (void *)fw.data;
493 return true;
494 }
495
496 return false;
497}
498
499static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
500{
501 struct cpio_data cp;
502
503 if (!get_builtin_microcode(cp: &cp, family: x86_family(sig: cpuid_1_eax)))
504 cp = find_microcode_in_initrd(path: ucode_path);
505
506 *ret = cp;
507}
508
509void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
510{
511 struct cpio_data cp = { };
512
513 /* Needed in load_microcode_amd() */
514 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
515
516 find_blobs_in_containers(cpuid_1_eax, ret: &cp);
517 if (!(cp.data && cp.size))
518 return;
519
520 early_apply_microcode(cpuid_1_eax, ucode: cp.data, size: cp.size);
521}
522
523static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
524
525static int __init save_microcode_in_initrd(void)
526{
527 unsigned int cpuid_1_eax = native_cpuid_eax(op: 1);
528 struct cpuinfo_x86 *c = &boot_cpu_data;
529 struct cont_desc desc = { 0 };
530 enum ucode_state ret;
531 struct cpio_data cp;
532
533 if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
534 return 0;
535
536 find_blobs_in_containers(cpuid_1_eax, ret: &cp);
537 if (!(cp.data && cp.size))
538 return -EINVAL;
539
540 desc.cpuid_1_eax = cpuid_1_eax;
541
542 scan_containers(ucode: cp.data, size: cp.size, desc: &desc);
543 if (!desc.mc)
544 return -EINVAL;
545
546 ret = load_microcode_amd(family: x86_family(sig: cpuid_1_eax), data: desc.data, size: desc.size);
547 if (ret > UCODE_UPDATED)
548 return -EINVAL;
549
550 return 0;
551}
552early_initcall(save_microcode_in_initrd);
553
554/*
555 * a small, trivial cache of per-family ucode patches
556 */
557static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
558{
559 struct ucode_patch *p;
560
561 list_for_each_entry(p, &microcode_cache, plist)
562 if (p->equiv_cpu == equiv_cpu)
563 return p;
564 return NULL;
565}
566
567static void update_cache(struct ucode_patch *new_patch)
568{
569 struct ucode_patch *p;
570
571 list_for_each_entry(p, &microcode_cache, plist) {
572 if (p->equiv_cpu == new_patch->equiv_cpu) {
573 if (p->patch_id >= new_patch->patch_id) {
574 /* we already have the latest patch */
575 kfree(objp: new_patch->data);
576 kfree(objp: new_patch);
577 return;
578 }
579
580 list_replace(old: &p->plist, new: &new_patch->plist);
581 kfree(objp: p->data);
582 kfree(objp: p);
583 return;
584 }
585 }
586 /* no patch found, add it */
587 list_add_tail(new: &new_patch->plist, head: &microcode_cache);
588}
589
590static void free_cache(void)
591{
592 struct ucode_patch *p, *tmp;
593
594 list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
595 __list_del(prev: p->plist.prev, next: p->plist.next);
596 kfree(objp: p->data);
597 kfree(objp: p);
598 }
599}
600
601static struct ucode_patch *find_patch(unsigned int cpu)
602{
603 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
604 u16 equiv_id;
605
606 equiv_id = find_equiv_id(et: &equiv_table, sig: uci->cpu_sig.sig);
607 if (!equiv_id)
608 return NULL;
609
610 return cache_find_patch(equiv_cpu: equiv_id);
611}
612
613void reload_ucode_amd(unsigned int cpu)
614{
615 u32 rev, dummy __always_unused;
616 struct microcode_amd *mc;
617 struct ucode_patch *p;
618
619 p = find_patch(cpu);
620 if (!p)
621 return;
622
623 mc = p->data;
624
625 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
626
627 if (rev < mc->hdr.patch_id) {
628 if (!__apply_microcode_amd(mc)) {
629 ucode_new_rev = mc->hdr.patch_id;
630 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
631 }
632 }
633}
634
635static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
636{
637 struct cpuinfo_x86 *c = &cpu_data(cpu);
638 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
639 struct ucode_patch *p;
640
641 csig->sig = cpuid_eax(op: 0x00000001);
642 csig->rev = c->microcode;
643
644 /*
645 * a patch could have been loaded early, set uci->mc so that
646 * mc_bp_resume() can call apply_microcode()
647 */
648 p = find_patch(cpu);
649 if (p && (p->patch_id == csig->rev))
650 uci->mc = p->data;
651
652 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
653
654 return 0;
655}
656
657static enum ucode_state apply_microcode_amd(int cpu)
658{
659 struct cpuinfo_x86 *c = &cpu_data(cpu);
660 struct microcode_amd *mc_amd;
661 struct ucode_cpu_info *uci;
662 struct ucode_patch *p;
663 enum ucode_state ret;
664 u32 rev, dummy __always_unused;
665
666 BUG_ON(raw_smp_processor_id() != cpu);
667
668 uci = ucode_cpu_info + cpu;
669
670 p = find_patch(cpu);
671 if (!p)
672 return UCODE_NFOUND;
673
674 mc_amd = p->data;
675 uci->mc = p->data;
676
677 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
678
679 /* need to apply patch? */
680 if (rev > mc_amd->hdr.patch_id) {
681 ret = UCODE_OK;
682 goto out;
683 }
684
685 if (__apply_microcode_amd(mc: mc_amd)) {
686 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
687 cpu, mc_amd->hdr.patch_id);
688 return UCODE_ERROR;
689 }
690
691 rev = mc_amd->hdr.patch_id;
692 ret = UCODE_UPDATED;
693
694 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
695
696out:
697 uci->cpu_sig.rev = rev;
698 c->microcode = rev;
699
700 /* Update boot_cpu_data's revision too, if we're on the BSP: */
701 if (c->cpu_index == boot_cpu_data.cpu_index)
702 boot_cpu_data.microcode = rev;
703
704 return ret;
705}
706
707void load_ucode_amd_ap(unsigned int cpuid_1_eax)
708{
709 unsigned int cpu = smp_processor_id();
710
711 ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
712 apply_microcode_amd(cpu);
713}
714
715static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
716{
717 u32 equiv_tbl_len;
718 const u32 *hdr;
719
720 if (!verify_equivalence_table(buf, buf_size))
721 return 0;
722
723 hdr = (const u32 *)buf;
724 equiv_tbl_len = hdr[2];
725
726 equiv_table.entry = vmalloc(size: equiv_tbl_len);
727 if (!equiv_table.entry) {
728 pr_err("failed to allocate equivalent CPU table\n");
729 return 0;
730 }
731
732 memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
733 equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
734
735 /* add header length */
736 return equiv_tbl_len + CONTAINER_HDR_SZ;
737}
738
739static void free_equiv_cpu_table(void)
740{
741 vfree(addr: equiv_table.entry);
742 memset(&equiv_table, 0, sizeof(equiv_table));
743}
744
745static void cleanup(void)
746{
747 free_equiv_cpu_table();
748 free_cache();
749}
750
751/*
752 * Return a non-negative value even if some of the checks failed so that
753 * we can skip over the next patch. If we return a negative value, we
754 * signal a grave error like a memory allocation has failed and the
755 * driver cannot continue functioning normally. In such cases, we tear
756 * down everything we've used up so far and exit.
757 */
758static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
759 unsigned int *patch_size)
760{
761 struct microcode_header_amd *mc_hdr;
762 struct ucode_patch *patch;
763 u16 proc_id;
764 int ret;
765
766 ret = verify_patch(family, buf: fw, buf_size: leftover, patch_size);
767 if (ret)
768 return ret;
769
770 patch = kzalloc(size: sizeof(*patch), GFP_KERNEL);
771 if (!patch) {
772 pr_err("Patch allocation failure.\n");
773 return -EINVAL;
774 }
775
776 patch->data = kmemdup(p: fw + SECTION_HDR_SIZE, size: *patch_size, GFP_KERNEL);
777 if (!patch->data) {
778 pr_err("Patch data allocation failure.\n");
779 kfree(objp: patch);
780 return -EINVAL;
781 }
782 patch->size = *patch_size;
783
784 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
785 proc_id = mc_hdr->processor_rev_id;
786
787 INIT_LIST_HEAD(list: &patch->plist);
788 patch->patch_id = mc_hdr->patch_id;
789 patch->equiv_cpu = proc_id;
790
791 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
792 __func__, patch->patch_id, proc_id);
793
794 /* ... and add to cache. */
795 update_cache(new_patch: patch);
796
797 return 0;
798}
799
800/* Scan the blob in @data and add microcode patches to the cache. */
801static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
802 size_t size)
803{
804 u8 *fw = (u8 *)data;
805 size_t offset;
806
807 offset = install_equiv_cpu_table(buf: data, buf_size: size);
808 if (!offset)
809 return UCODE_ERROR;
810
811 fw += offset;
812 size -= offset;
813
814 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
815 pr_err("invalid type field in container file section header\n");
816 free_equiv_cpu_table();
817 return UCODE_ERROR;
818 }
819
820 while (size > 0) {
821 unsigned int crnt_size = 0;
822 int ret;
823
824 ret = verify_and_add_patch(family, fw, leftover: size, patch_size: &crnt_size);
825 if (ret < 0)
826 return UCODE_ERROR;
827
828 fw += crnt_size + SECTION_HDR_SIZE;
829 size -= (crnt_size + SECTION_HDR_SIZE);
830 }
831
832 return UCODE_OK;
833}
834
835static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
836{
837 struct cpuinfo_x86 *c;
838 unsigned int nid, cpu;
839 struct ucode_patch *p;
840 enum ucode_state ret;
841
842 /* free old equiv table */
843 free_equiv_cpu_table();
844
845 ret = __load_microcode_amd(family, data, size);
846 if (ret != UCODE_OK) {
847 cleanup();
848 return ret;
849 }
850
851 for_each_node(nid) {
852 cpu = cpumask_first(srcp: cpumask_of_node(node: nid));
853 c = &cpu_data(cpu);
854
855 p = find_patch(cpu);
856 if (!p)
857 continue;
858
859 if (c->microcode >= p->patch_id)
860 continue;
861
862 ret = UCODE_NEW;
863 }
864
865 return ret;
866}
867
868/*
869 * AMD microcode firmware naming convention, up to family 15h they are in
870 * the legacy file:
871 *
872 * amd-ucode/microcode_amd.bin
873 *
874 * This legacy file is always smaller than 2K in size.
875 *
876 * Beginning with family 15h, they are in family-specific firmware files:
877 *
878 * amd-ucode/microcode_amd_fam15h.bin
879 * amd-ucode/microcode_amd_fam16h.bin
880 * ...
881 *
882 * These might be larger than 2K.
883 */
884static enum ucode_state request_microcode_amd(int cpu, struct device *device)
885{
886 char fw_name[36] = "amd-ucode/microcode_amd.bin";
887 struct cpuinfo_x86 *c = &cpu_data(cpu);
888 enum ucode_state ret = UCODE_NFOUND;
889 const struct firmware *fw;
890
891 if (force_minrev)
892 return UCODE_NFOUND;
893
894 if (c->x86 >= 0x15)
895 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
896
897 if (request_firmware_direct(fw: &fw, name: (const char *)fw_name, device)) {
898 pr_debug("failed to load file %s\n", fw_name);
899 goto out;
900 }
901
902 ret = UCODE_ERROR;
903 if (!verify_container(buf: fw->data, buf_size: fw->size))
904 goto fw_release;
905
906 ret = load_microcode_amd(family: c->x86, data: fw->data, size: fw->size);
907
908 fw_release:
909 release_firmware(fw);
910
911 out:
912 return ret;
913}
914
915static void microcode_fini_cpu_amd(int cpu)
916{
917 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
918
919 uci->mc = NULL;
920}
921
922static struct microcode_ops microcode_amd_ops = {
923 .request_microcode_fw = request_microcode_amd,
924 .collect_cpu_info = collect_cpu_info_amd,
925 .apply_microcode = apply_microcode_amd,
926 .microcode_fini_cpu = microcode_fini_cpu_amd,
927 .nmi_safe = true,
928};
929
930struct microcode_ops * __init init_amd_microcode(void)
931{
932 struct cpuinfo_x86 *c = &boot_cpu_data;
933
934 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
935 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
936 return NULL;
937 }
938
939 if (ucode_new_rev)
940 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
941 ucode_new_rev);
942
943 return &microcode_amd_ops;
944}
945
946void __exit exit_amd_microcode(void)
947{
948 cleanup();
949}
950

source code of linux/arch/x86/kernel/cpu/microcode/amd.c