summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa/xsa347-4.13-3.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa347-4.13-3.patch')
-rw-r--r--system/xen/xsa/xsa347-4.13-3.patch59
1 files changed, 59 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa347-4.13-3.patch b/system/xen/xsa/xsa347-4.13-3.patch
new file mode 100644
index 0000000000..90c8e66020
--- /dev/null
+++ b/system/xen/xsa/xsa347-4.13-3.patch
@@ -0,0 +1,59 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: AMD/IOMMU: ensure suitable ordering of DTE modifications
+
+DMA and interrupt translation should be enabled only after other
+applicable DTE fields have been written. Similarly when disabling
+translation or when moving a device between domains, translation should
+first be disabled, before other entry fields get modified. Note however
+that the "moving" aspect doesn't apply to the interrupt remapping side,
+as domain specifics are maintained in the IRTEs here, not the DTE. We
+also never disable interrupt remapping once it got enabled for a device
+(the respective argument passed is always the immutable iommu_intremap).
+
+This is part of XSA-347.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+
+--- a/xen/drivers/passthrough/amd/iommu_map.c
++++ b/xen/drivers/passthrough/amd/iommu_map.c
+@@ -107,11 +107,18 @@ void amd_iommu_set_root_page_table(struc
+ uint64_t root_ptr, uint16_t domain_id,
+ uint8_t paging_mode, bool valid)
+ {
++ if ( valid || dte->v )
++ {
++ dte->tv = false;
++ dte->v = true;
++ smp_wmb();
++ }
+ dte->domain_id = domain_id;
+ dte->pt_root = paddr_to_pfn(root_ptr);
+ dte->iw = true;
+ dte->ir = true;
+ dte->paging_mode = paging_mode;
++ smp_wmb();
+ dte->tv = true;
+ dte->v = valid;
+ }
+@@ -134,6 +141,7 @@ void amd_iommu_set_intremap_table(
+ }
+
+ dte->ig = false; /* unmapped interrupts result in i/o page faults */
++ smp_wmb();
+ dte->iv = valid;
+ }
+
+--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
++++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
+@@ -120,7 +120,10 @@ static void amd_iommu_setup_domain_devic
+ /* Undo what amd_iommu_disable_domain_device() may have done. */
+ ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
+ if ( dte->it_root )
++ {
+ dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED;
++ smp_wmb();
++ }
+ dte->iv = iommu_intremap;
+ dte->ex = ivrs_dev->dte_allow_exclusion;
+ dte->sys_mgt = MASK_EXTR(ivrs_dev->device_flags, ACPI_IVHD_SYSTEM_MGMT);