untrusted comment: verify with openbsd-72-base.pub
RWQTKNnK3CZZ8Nu9ll6vyAa7XuKWKaRDgPfRjhN5yLLDlbfalJC3gtASzdexlpzfdMQDfF4eY38acsyc9LX6d1ALRhxasb0qsws=

OpenBSD 7.2 errata 035, July 24, 2023:

Install firmware updates for AMD cpus, to repair past or potential future bugs.
For i386 and amd64 platforms.

Apply by doing:
    signify -Vep /etc/signify/openbsd-72-base.pub -x 035_amdcpufirmware.patch.sig \
        -m - | (cd /usr/src && patch -p0)

And then rebuild and install a new kernel:
    KK=`sysctl -n kern.osversion | cut -d# -f1`
    cd /usr/src/sys/arch/`machine`/compile/$KK
    make obj
    make config
    make
    make install

New firmwares can be downloaded using
    fw_update

New bootblocks will need to be installed, using a command similar to this:
    installboot -v sd0

Index: sys/arch/amd64/amd64/ucode.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/ucode.c,v
diff -u -p -u -r1.5 ucode.c
--- sys/arch/amd64/amd64/ucode.c	28 Jun 2019 21:54:05 -0000	1.5
+++ sys/arch/amd64/amd64/ucode.c	23 Jul 2023 02:44:39 -0000
@@ -72,6 +72,8 @@ size_t	 cpu_ucode_size;
 void	 cpu_ucode_setup(void);
 void	 cpu_ucode_apply(struct cpu_info *);
 
+struct mutex	cpu_ucode_mtx = MUTEX_INITIALIZER(IPL_HIGH);
+
 /* Intel */
 void	 cpu_ucode_intel_apply(struct cpu_info *);
 struct intel_ucode_header *
@@ -82,7 +84,8 @@ int	 cpu_ucode_intel_match(struct intel_
 uint32_t cpu_ucode_intel_rev(void);
 
 struct intel_ucode_header	*cpu_ucode_intel_applied;
-struct mutex			 cpu_ucode_intel_mtx = MUTEX_INITIALIZER(IPL_HIGH);
+
+void cpu_ucode_amd_apply(struct cpu_info *);
 
 void
 cpu_ucode_setup(void)
@@ -107,6 +110,99 @@ cpu_ucode_apply(struct cpu_info *ci)
 {
 	if (strcmp(cpu_vendor, "GenuineIntel") == 0)
 		cpu_ucode_intel_apply(ci);
+	else if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+		cpu_ucode_amd_apply(ci);
+}
+
+#define AMD_MAGIC 0x00414d44
+
+struct amd_equiv {
+	uint32_t id;
+	uint32_t a;
+	uint32_t b;
+	uint16_t eid;
+	uint16_t c;
+} __packed;
+
+struct amd_patch {
+	uint32_t type;
+	uint32_t len;
+	uint32_t a;
+	uint32_t level;
+	uint8_t c[16];
+	uint16_t eid;
+} __packed;
+
+void
+cpu_ucode_amd_apply(struct cpu_info *ci)
+{
+	uint64_t level;
+	uint32_t magic, tlen, i;
+	uint16_t eid = 0;
+	uint32_t sig, ebx, ecx, edx;
+	uint64_t start = 0;
+
+	if (cpu_ucode_data == NULL || cpu_ucode_size == 0) {
+		DPRINTF(("%s: no microcode provided\n", __func__));
+		return;
+	}
+
+	/*
+	 * Grab a mutex, because we are not allowed to run updates
+	 * simultaneously on HT siblings.
+	 */
+	mtx_enter(&cpu_ucode_mtx);
+
+	CPUID(1, sig, ebx, ecx, edx);
+
+	level = rdmsr(MSR_PATCH_LEVEL);
+	DPRINTF(("%s: cur patch level 0x%llx\n", __func__, level));
+
+	memcpy(&magic, cpu_ucode_data, 4);
+	if (magic != AMD_MAGIC) {
+		DPRINTF(("%s: bad magic %x\n", __func__, magic));
+		goto out;
+	}
+
+	memcpy(&tlen, &cpu_ucode_data[8], 4);
+
+	/* find equivalence id matching our cpu signature */
+	for (i = 12; i < 12 + tlen;) {
+		struct amd_equiv ae;
+		if (i + sizeof(ae) > cpu_ucode_size) {
+			DPRINTF(("%s: truncated etable\n", __func__));
+			goto out;
+		}
+		memcpy(&ae, &cpu_ucode_data[i], sizeof(ae));
+		i += sizeof(ae);
+		if (ae.id == sig)
+			eid = ae.eid;
+	}
+
+	/* look for newer patch with the equivalence id */
+	while (i < cpu_ucode_size) {
+		struct amd_patch ap;
+		if (i + sizeof(ap) > cpu_ucode_size) {
+			DPRINTF(("%s: truncated ptable\n", __func__));
+			goto out;
+		}
+		memcpy(&ap, &cpu_ucode_data[i], sizeof(ap));
+		if (ap.type == 1 && ap.eid == eid && ap.level > level)
+			start = (uint64_t)&cpu_ucode_data[i + 8];
+		if (i + ap.len + 8 > cpu_ucode_size) {
+			DPRINTF(("%s: truncated patch\n", __func__));
+			goto out;
+		}
+		i += ap.len + 8;
+	}
+
+	if (start != 0) {
+		wrmsr(MSR_PATCH_LOADER, start);
+		level = rdmsr(MSR_PATCH_LEVEL);
+		DPRINTF(("%s: new patch level 0x%llx\n", __func__, level));
+	}
+out:
+	mtx_leave(&cpu_ucode_mtx);
 }
 
 void
@@ -125,7 +221,7 @@ cpu_ucode_intel_apply(struct cpu_info *c
 	 * Grab a mutex, because we are not allowed to run updates
 	 * simultaneously on HT siblings.
 	 */
-	mtx_enter(&cpu_ucode_intel_mtx);
+	mtx_enter(&cpu_ucode_mtx);
 
 	old_rev = cpu_ucode_intel_rev();
 	update = cpu_ucode_intel_applied;
@@ -159,7 +255,7 @@ cpu_ucode_intel_apply(struct cpu_info *c
 	}
 
 out:
-	mtx_leave(&cpu_ucode_intel_mtx);
+	mtx_leave(&cpu_ucode_mtx);
 }
 
 struct intel_ucode_header *
Index: sys/arch/i386/i386/ucode.c
===================================================================
RCS file: /cvs/src/sys/arch/i386/i386/ucode.c,v
diff -u -p -u -r1.3 ucode.c
--- sys/arch/i386/i386/ucode.c	15 Dec 2020 22:51:34 -0000	1.3
+++ sys/arch/i386/i386/ucode.c	23 Jul 2023 02:56:42 -0000
@@ -72,6 +72,8 @@ size_t	 cpu_ucode_size;
 void	 cpu_ucode_setup(void);
 void	 cpu_ucode_apply(struct cpu_info *);
 
+struct mutex	cpu_ucode_mtx = MUTEX_INITIALIZER(IPL_HIGH);
+
 /* Intel */
 void	 cpu_ucode_intel_apply(struct cpu_info *);
 struct intel_ucode_header *
@@ -82,7 +84,8 @@ int	 cpu_ucode_intel_match(struct intel_
 uint32_t cpu_ucode_intel_rev(void);
 
 struct intel_ucode_header	*cpu_ucode_intel_applied;
-struct mutex			 cpu_ucode_intel_mtx = MUTEX_INITIALIZER(IPL_HIGH);
+
+void cpu_ucode_amd_apply(struct cpu_info *);
 
 void
 cpu_ucode_setup(void)
@@ -130,6 +133,99 @@ cpu_ucode_apply(struct cpu_info *ci)
 {
 	if (strcmp(cpu_vendor, "GenuineIntel") == 0)
 		cpu_ucode_intel_apply(ci);
+	else if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+		cpu_ucode_amd_apply(ci);
+}
+
+#define AMD_MAGIC 0x00414d44
+
+struct amd_equiv {
+	uint32_t id;
+	uint32_t a;
+	uint32_t b;
+	uint16_t eid;
+	uint16_t c;
+} __packed;
+
+struct amd_patch {
+	uint32_t type;
+	uint32_t len;
+	uint32_t a;
+	uint32_t level;
+	uint8_t c[16];
+	uint16_t eid;
+} __packed;
+
+void
+cpu_ucode_amd_apply(struct cpu_info *ci)
+{
+	uint64_t level;
+	uint32_t magic, tlen, i;
+	uint16_t eid = 0;
+	uint32_t sig, ebx, ecx, edx;
+	uint64_t start = 0;
+
+	if (cpu_ucode_data == NULL || cpu_ucode_size == 0) {
+		DPRINTF(("%s: no microcode provided\n", __func__));
+		return;
+	}
+
+	/*
+	 * Grab a mutex, because we are not allowed to run updates
+	 * simultaneously on HT siblings.
+	 */
+	mtx_enter(&cpu_ucode_mtx);
+
+	CPUID(1, sig, ebx, ecx, edx);
+
+	level = rdmsr(MSR_PATCH_LEVEL);
+	DPRINTF(("%s: cur patch level 0x%llx\n", __func__, level));
+
+	memcpy(&magic, cpu_ucode_data, 4);
+	if (magic != AMD_MAGIC) {
+		DPRINTF(("%s: bad magic %x\n", __func__, magic));
+		goto out;
+	}
+
+	memcpy(&tlen, &cpu_ucode_data[8], 4);
+
+	/* find equivalence id matching our cpu signature */
+	for (i = 12; i < 12 + tlen;) {
+		struct amd_equiv ae;
+		if (i + sizeof(ae) > cpu_ucode_size) {
+			DPRINTF(("%s: truncated etable\n", __func__));
+			goto out;
+		}
+		memcpy(&ae, &cpu_ucode_data[i], sizeof(ae));
+		i += sizeof(ae);
+		if (ae.id == sig)
+			eid = ae.eid;
+	}
+
+	/* look for newer patch with the equivalence id */
+	while (i < cpu_ucode_size) {
+		struct amd_patch ap;
+		if (i + sizeof(ap) > cpu_ucode_size) {
+			DPRINTF(("%s: truncated ptable\n", __func__));
+			goto out;
+		}
+		memcpy(&ap, &cpu_ucode_data[i], sizeof(ap));
+		if (ap.type == 1 && ap.eid == eid && ap.level > level)
+			start = (uint64_t)&cpu_ucode_data[i + 8];
+		if (i + ap.len + 8 > cpu_ucode_size) {
+			DPRINTF(("%s: truncated patch\n", __func__));
+			goto out;
+		}
+		i += ap.len + 8;
+	}
+
+	if (start != 0) {
+		wrmsr(MSR_PATCH_LOADER, start);
+		level = rdmsr(MSR_PATCH_LEVEL);
+		DPRINTF(("%s: new patch level 0x%llx\n", __func__, level));
+	}
+out:
+	mtx_leave(&cpu_ucode_mtx);
 }
 
 void
@@ -148,7 +244,7 @@ cpu_ucode_intel_apply(struct cpu_info *c
 	 * Grab a mutex, because we are not allowed to run updates
 	 * simultaneously on HT siblings.
 	 */
-	mtx_enter(&cpu_ucode_intel_mtx);
+	mtx_enter(&cpu_ucode_mtx);
 
 	old_rev = cpu_ucode_intel_rev();
 	update = cpu_ucode_intel_applied;
@@ -182,7 +278,7 @@ cpu_ucode_intel_apply(struct cpu_info *c
 	}
 
 out:
-	mtx_leave(&cpu_ucode_intel_mtx);
+	mtx_leave(&cpu_ucode_mtx);
 }
 
 struct intel_ucode_header *