summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa
diff options
context:
space:
mode:
author Mario Preksavec <mario@slackware.hr>2020-07-18 00:23:20 +0200
committer Willy Sudiarto Raharjo <willysr@slackbuilds.org>2020-08-29 10:08:53 +0700
commit2344addba8f5f78354c7c6f0abe56c03356b6885 (patch)
tree64ea0739d52fc05ee9e441cbd322db89c35c21a4 /system/xen/xsa
parent28040a86f75de6bb1f358fdac98ee8497866286e (diff)
downloadslackbuilds-2344addba8f5f78354c7c6f0abe56c03356b6885.tar.gz
slackbuilds-2344addba8f5f78354c7c6f0abe56c03356b6885.tar.xz
system/xen: XSA 317-328 update.
Signed-off-by: Mario Preksavec <mario@slackware.hr> Signed-off-by: Willy Sudiarto Raharjo <willysr@slackbuilds.org>
Diffstat (limited to 'system/xen/xsa')
-rw-r--r--system/xen/xsa/xsa317.patch50
-rw-r--r--system/xen/xsa/xsa319.patch27
-rw-r--r--system/xen/xsa/xsa320-4.13-1.patch117
-rw-r--r--system/xen/xsa/xsa320-4.13-2.patch179
-rw-r--r--system/xen/xsa/xsa320-4.13-3.patch36
-rw-r--r--system/xen/xsa/xsa327.patch63
-rw-r--r--system/xen/xsa/xsa328-4.13-1.patch118
-rw-r--r--system/xen/xsa/xsa328-4.13-2.patch48
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-1.patch31
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-2.patch175
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-3.patch82
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-4.patch36
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-5.patch24
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-6.patch91
-rw-r--r--system/xen/xsa/xsa328-post-xsa321-4.13-7.patch153
15 files changed, 1230 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa317.patch b/system/xen/xsa/xsa317.patch
new file mode 100644
index 0000000000..20e2c643d0
--- /dev/null
+++ b/system/xen/xsa/xsa317.patch
@@ -0,0 +1,50 @@
+From aeb46e92f915f19a61d5a8a1f4b696793f64e6fb Mon Sep 17 00:00:00 2001
+From: Julien Grall <jgrall@amazon.com>
+Date: Thu, 19 Mar 2020 13:17:31 +0000
+Subject: [PATCH] xen/common: event_channel: Don't ignore error in
+ get_free_port()
+
+Currently, get_free_port() is assuming that the port has been allocated
+when evtchn_allocate_port() is not return -EBUSY.
+
+However, the function may return an error when:
+ - We exhausted all the event channels. This can happen if the limit
+ configured by the administrator for the guest ('max_event_channels'
+ in xl cfg) is higher than the ABI used by the guest. For instance,
+ if the guest is using 2L, the limit should not be higher than 4095.
+ - We cannot allocate memory (e.g Xen has not more memory).
+
+Users of get_free_port() (such as EVTCHNOP_alloc_unbound) will validly
+assuming the port was valid and will next call evtchn_from_port(). This
+will result to a crash as the memory backing the event channel structure
+is not present.
+
+Fixes: 368ae9a05fe ("xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU")
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+ xen/common/event_channel.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
+index e86e2bfab0..a8d182b584 100644
+--- a/xen/common/event_channel.c
++++ b/xen/common/event_channel.c
+@@ -195,10 +195,10 @@ static int get_free_port(struct domain *d)
+ {
+ int rc = evtchn_allocate_port(d, port);
+
+- if ( rc == -EBUSY )
+- continue;
+-
+- return port;
++ if ( rc == 0 )
++ return port;
++ else if ( rc != -EBUSY )
++ return rc;
+ }
+
+ return -ENOSPC;
+--
+2.17.1
+
diff --git a/system/xen/xsa/xsa319.patch b/system/xen/xsa/xsa319.patch
new file mode 100644
index 0000000000..769443c900
--- /dev/null
+++ b/system/xen/xsa/xsa319.patch
@@ -0,0 +1,27 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/shadow: correct an inverted conditional in dirty VRAM tracking
+
+This originally was "mfn_x(mfn) == INVALID_MFN". Make it like this
+again, taking the opportunity to also drop the unnecessary nearby
+braces.
+
+This is XSA-319.
+
+Fixes: 246a5a3377c2 ("xen: Use a typesafe to define INVALID_MFN")
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -3252,10 +3252,8 @@ int shadow_track_dirty_vram(struct domai
+ int dirty = 0;
+ paddr_t sl1ma = dirty_vram->sl1ma[i];
+
+- if ( !mfn_eq(mfn, INVALID_MFN) )
+- {
++ if ( mfn_eq(mfn, INVALID_MFN) )
+ dirty = 1;
+- }
+ else
+ {
+ page = mfn_to_page(mfn);
diff --git a/system/xen/xsa/xsa320-4.13-1.patch b/system/xen/xsa/xsa320-4.13-1.patch
new file mode 100644
index 0000000000..09eb8ea98e
--- /dev/null
+++ b/system/xen/xsa/xsa320-4.13-1.patch
@@ -0,0 +1,117 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+
+This is part of XSA-320 / CVE-2020-0543
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Wei Liu <wl@xen.org>
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index 1d9d816622..9268454297 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -483,10 +483,10 @@ accounting for hardware capabilities as enumerated via CPUID.
+
+ Currently accepted:
+
+-The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, `ibpb`,
+-`l1d-flush` and `ssbd` are used by default if available and applicable. They can
+-be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
+-won't offer them to guests.
++The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
++`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
++applicable. They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
++use them itself, and won't offer them to guests.
+
+ ### cpuid_mask_cpu
+ > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
+diff --git a/tools/libxl/libxl_cpuid.c b/tools/libxl/libxl_cpuid.c
+index 6cea4227ba..a78f08b927 100644
+--- a/tools/libxl/libxl_cpuid.c
++++ b/tools/libxl/libxl_cpuid.c
+@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str)
+
+ {"avx512-4vnniw",0x00000007, 0, CPUID_REG_EDX, 2, 1},
+ {"avx512-4fmaps",0x00000007, 0, CPUID_REG_EDX, 3, 1},
++ {"srbds-ctrl", 0x00000007, 0, CPUID_REG_EDX, 9, 1},
+ {"md-clear", 0x00000007, 0, CPUID_REG_EDX, 10, 1},
+ {"cet-ibt", 0x00000007, 0, CPUID_REG_EDX, 20, 1},
+ {"ibrsb", 0x00000007, 0, CPUID_REG_EDX, 26, 1},
+diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
+index 603e1d65fd..a09440813b 100644
+--- a/tools/misc/xen-cpuid.c
++++ b/tools/misc/xen-cpuid.c
+@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
+ [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
+ [ 4] = "fsrm",
+
++ /* 8 */ [ 9] = "srbds-ctrl",
+ [10] = "md-clear",
+ /* 12 */ [13] = "tsx-force-abort",
+
+diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
+index 4b12103482..0cded3c0ad 100644
+--- a/xen/arch/x86/msr.c
++++ b/xen/arch/x86/msr.c
+@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
+ /* Write-only */
+ case MSR_TSX_FORCE_ABORT:
+ case MSR_TSX_CTRL:
++ case MSR_MCU_OPT_CTRL:
+ case MSR_U_CET:
+ case MSR_S_CET:
+ case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
+@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+ /* Read-only */
+ case MSR_TSX_FORCE_ABORT:
+ case MSR_TSX_CTRL:
++ case MSR_MCU_OPT_CTRL:
+ case MSR_U_CET:
+ case MSR_S_CET:
+ case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 6656c44aec..5fc1c6827e 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -312,12 +312,13 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ printk("Speculative mitigation facilities:\n");
+
+ /* Hardware features which pertain to speculative mitigations. */
+- printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
++ printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_SSBD)) ? " SSBD" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
++ (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
+ (e8b & cpufeat_mask(X86_FEATURE_IBPB)) ? " IBPB" : "",
+ (caps & ARCH_CAPS_IBRS_ALL) ? " IBRS_ALL" : "",
+ (caps & ARCH_CAPS_RDCL_NO) ? " RDCL_NO" : "",
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 7693c4a71a..91994669e1 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -179,6 +179,9 @@
+ #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x490
+ #define MSR_IA32_VMX_VMFUNC 0x491
+
++#define MSR_MCU_OPT_CTRL 0x00000123
++#define MCU_OPT_CTRL_RNGDS_MITG_DIS (_AC(1, ULL) << 0)
++
+ #define MSR_U_CET 0x000006a0
+ #define MSR_S_CET 0x000006a2
+ #define MSR_PL0_SSP 0x000006a4
+diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
+index 2835688f1c..a2482c3627 100644
+--- a/xen/include/public/arch-x86/cpufeatureset.h
++++ b/xen/include/public/arch-x86/cpufeatureset.h
+@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by
+ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
+ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */
+ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
++XEN_CPUFEATURE(SRBDS_CTRL, 9*32+ 9) /* MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. */
+ XEN_CPUFEATURE(MD_CLEAR, 9*32+10) /*A VERW clears microarchitectural buffers */
+ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
+ XEN_CPUFEATURE(CET_IBT, 9*32+20) /* CET - Indirect Branch Tracking */
diff --git a/system/xen/xsa/xsa320-4.13-2.patch b/system/xen/xsa/xsa320-4.13-2.patch
new file mode 100644
index 0000000000..8a8080a312
--- /dev/null
+++ b/system/xen/xsa/xsa320-4.13-2.patch
@@ -0,0 +1,179 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Mitigate the Special Register Buffer Data Sampling sidechannel
+
+See patch documentation and comments.
+
+This is part of XSA-320 / CVE-2020-0543
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index 9268454297..c780312531 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -1991,7 +1991,7 @@ By default SSBD will be mitigated at runtime (i.e `ssbd=runtime`).
+ ### spec-ctrl (x86)
+ > `= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb,md-clear}=<bool>,
+ > bti-thunk=retpoline|lfence|jmp, {ibrs,ibpb,ssbd,eager-fpu,
+-> l1d-flush,branch-harden}=<bool> ]`
++> l1d-flush,branch-harden,srb-lock}=<bool> ]`
+
+ Controls for speculative execution sidechannel mitigations. By default, Xen
+ will pick the most appropriate mitigations based on compiled in support,
+@@ -2068,6 +2068,12 @@ If Xen is compiled with `CONFIG_SPECULATIVE_HARDEN_BRANCH`, the
+ speculation barriers to protect selected conditional branches. By default,
+ Xen will enable this mitigation.
+
++On hardware supporting SRBDS_CTRL, the `srb-lock=` option can be used to force
++or prevent Xen from protect the Special Register Buffer from leaking stale
++data. By default, Xen will enable this mitigation, except on parts where MDS
++is fixed and TAA is fixed/mitigated (in which case, there is believed to be no
++way for an attacker to obtain the stale data).
++
+ ### sync_console
+ > `= <boolean>`
+
+diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
+index feb0f6ce20..75c6e34164 100644
+--- a/xen/arch/x86/acpi/power.c
++++ b/xen/arch/x86/acpi/power.c
+@@ -295,6 +295,9 @@ static int enter_state(u32 state)
+ ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
+ spec_ctrl_exit_idle(ci);
+
++ if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
++ wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
++
+ done:
+ spin_debug_enable();
+ local_irq_restore(flags);
+diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
+index dc8fdac1a1..b1e51b3aff 100644
+--- a/xen/arch/x86/smpboot.c
++++ b/xen/arch/x86/smpboot.c
+@@ -361,12 +361,14 @@ void start_secondary(void *unused)
+ microcode_update_one(false);
+
+ /*
+- * If MSR_SPEC_CTRL is available, apply Xen's default setting and discard
+- * any firmware settings. Note: MSR_SPEC_CTRL may only become available
+- * after loading microcode.
++ * If any speculative control MSRs are available, apply Xen's default
++ * settings. Note: These MSRs may only become available after loading
++ * microcode.
+ */
+ if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+ wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
++ if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
++ wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
+
+ tsx_init(); /* Needs microcode. May change HLE/RTM feature bits. */
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 5fc1c6827e..33343062a7 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -65,6 +65,9 @@ static unsigned int __initdata l1d_maxphysaddr;
+ static bool __initdata cpu_has_bug_msbds_only; /* => minimal HT impact. */
+ static bool __initdata cpu_has_bug_mds; /* Any other M{LP,SB,FB}DS combination. */
+
++static int8_t __initdata opt_srb_lock = -1;
++uint64_t __read_mostly default_xen_mcu_opt_ctrl;
++
+ static int __init parse_spec_ctrl(const char *s)
+ {
+ const char *ss;
+@@ -112,6 +115,7 @@ static int __init parse_spec_ctrl(const char *s)
+ opt_ssbd = false;
+ opt_l1d_flush = 0;
+ opt_branch_harden = false;
++ opt_srb_lock = 0;
+ }
+ else if ( val > 0 )
+ rc = -EINVAL;
+@@ -178,6 +182,8 @@ static int __init parse_spec_ctrl(const char *s)
+ opt_l1d_flush = val;
+ else if ( (val = parse_boolean("branch-harden", s, ss)) >= 0 )
+ opt_branch_harden = val;
++ else if ( (val = parse_boolean("srb-lock", s, ss)) >= 0 )
++ opt_srb_lock = val;
+ else
+ rc = -EINVAL;
+
+@@ -341,7 +347,7 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ "\n");
+
+ /* Settings for Xen's protection, irrespective of guests. */
+- printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, Other:%s%s%s%s\n",
++ printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, Other:%s%s%s%s%s\n",
+ thunk == THUNK_NONE ? "N/A" :
+ thunk == THUNK_RETPOLINE ? "RETPOLINE" :
+ thunk == THUNK_LFENCE ? "LFENCE" :
+@@ -352,6 +358,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ (default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-",
+ !(caps & ARCH_CAPS_TSX_CTRL) ? "" :
+ (opt_tsx & 1) ? " TSX+" : " TSX-",
++ !boot_cpu_has(X86_FEATURE_SRBDS_CTRL) ? "" :
++ opt_srb_lock ? " SRB_LOCK+" : " SRB_LOCK-",
+ opt_ibpb ? " IBPB" : "",
+ opt_l1d_flush ? " L1D_FLUSH" : "",
+ opt_md_clear_pv || opt_md_clear_hvm ? " VERW" : "",
+@@ -1149,6 +1157,34 @@ void __init init_speculation_mitigations(void)
+ tsx_init();
+ }
+
++ /* Calculate suitable defaults for MSR_MCU_OPT_CTRL */
++ if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
++ {
++ uint64_t val;
++
++ rdmsrl(MSR_MCU_OPT_CTRL, val);
++
++ /*
++ * On some SRBDS-affected hardware, it may be safe to relax srb-lock
++ * by default.
++ *
++ * On parts which enumerate MDS_NO and not TAA_NO, TSX is the only way
++ * to access the Fill Buffer. If TSX isn't available (inc. SKU
++ * reasons on some models), or TSX is explicitly disabled, then there
++ * is no need for the extra overhead to protect RDRAND/RDSEED.
++ */
++ if ( opt_srb_lock == -1 &&
++ (caps & (ARCH_CAPS_MDS_NO|ARCH_CAPS_TAA_NO)) == ARCH_CAPS_MDS_NO &&
++ (!cpu_has_hle || ((caps & ARCH_CAPS_TSX_CTRL) && opt_tsx == 0)) )
++ opt_srb_lock = 0;
++
++ val &= ~MCU_OPT_CTRL_RNGDS_MITG_DIS;
++ if ( !opt_srb_lock )
++ val |= MCU_OPT_CTRL_RNGDS_MITG_DIS;
++
++ default_xen_mcu_opt_ctrl = val;
++ }
++
+ print_details(thunk, caps);
+
+ /*
+@@ -1180,6 +1216,9 @@ void __init init_speculation_mitigations(void)
+
+ wrmsrl(MSR_SPEC_CTRL, bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl);
+ }
++
++ if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
++ wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
+ }
+
+ static void __init __maybe_unused build_assertions(void)
+diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
+index 9caecddfec..b252bb8631 100644
+--- a/xen/include/asm-x86/spec_ctrl.h
++++ b/xen/include/asm-x86/spec_ctrl.h
+@@ -54,6 +54,8 @@ extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu;
+ */
+ extern paddr_t l1tf_addr_mask, l1tf_safe_maddr;
+
++extern uint64_t default_xen_mcu_opt_ctrl;
++
+ static inline void init_shadow_spec_ctrl_state(void)
+ {
+ struct cpu_info *info = get_cpu_info();
diff --git a/system/xen/xsa/xsa320-4.13-3.patch b/system/xen/xsa/xsa320-4.13-3.patch
new file mode 100644
index 0000000000..b52eeb338a
--- /dev/null
+++ b/system/xen/xsa/xsa320-4.13-3.patch
@@ -0,0 +1,36 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Update docs with SRBDS workaround
+
+RDRAND/RDSEED can be hidden using cpuid= to mitigate SRBDS if microcode
+isn't available.
+
+This is part of XSA-320 / CVE-2020-0543.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Julien Grall <jgrall@amazon.com>
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index c780312531..81e12d053c 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -481,12 +481,18 @@ choice of `dom0-kernel` is deprecated and not supported by all Dom0 kernels.
+ This option allows for fine tuning of the facilities Xen will use, after
+ accounting for hardware capabilities as enumerated via CPUID.
+
++Unless otherwise noted, options only have any effect in their negative form,
++to hide the named feature(s). Ignoring a feature using this mechanism will
++cause Xen not to use the feature, nor offer them as usable to guests.
++
+ Currently accepted:
+
+ The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
+ `stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
+-applicable. They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
+-use them itself, and won't offer them to guests.
++applicable. They can all be ignored.
++
++`rdrand` and `rdseed` can be ignored, as a mitigation to XSA-320 /
++CVE-2020-0543.
+
+ ### cpuid_mask_cpu
+ > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
diff --git a/system/xen/xsa/xsa327.patch b/system/xen/xsa/xsa327.patch
new file mode 100644
index 0000000000..0541cfa0df
--- /dev/null
+++ b/system/xen/xsa/xsa327.patch
@@ -0,0 +1,63 @@
+From 030300ebbb86c40c12db038714479d746167c767 Mon Sep 17 00:00:00 2001
+From: Julien Grall <jgrall@amazon.com>
+Date: Tue, 26 May 2020 18:31:33 +0100
+Subject: [PATCH] xen: Check the alignment of the offset pased via
+ VCPUOP_register_vcpu_info
+
+Currently a guest is able to register any guest physical address to use
+for the vcpu_info structure as long as the structure can fits in the
+rest of the frame.
+
+This means a guest can provide an address that is not aligned to the
+natural alignment of the structure.
+
+On Arm 32-bit, unaligned access are completely forbidden by the
+hypervisor. This will result to a data abort which is fatal.
+
+On Arm 64-bit, unaligned access are only forbidden when used for atomic
+access. As the structure contains fields (such as evtchn_pending_self)
+that are updated using atomic operations, any unaligned access will be
+fatal as well.
+
+While the misalignment is only fatal on Arm, a generic check is added
+as an x86 guest shouldn't sensibly pass an unaligned address (this
+would result to a split lock).
+
+This is XSA-327.
+
+Reported-by: Julien Grall <jgrall@amazon.com>
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+---
+ xen/common/domain.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/xen/common/domain.c b/xen/common/domain.c
+index 7cc9526139a6..e9be05f1d05f 100644
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -1227,10 +1227,20 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
+ void *mapping;
+ vcpu_info_t *new_info;
+ struct page_info *page;
++ unsigned int align;
+
+ if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
+ return -EINVAL;
+
++#ifdef CONFIG_COMPAT
++ if ( has_32bit_shinfo(d) )
++ align = alignof(new_info->compat);
++ else
++#endif
++ align = alignof(*new_info);
++ if ( offset & (align - 1) )
++ return -EINVAL;
++
+ if ( !mfn_eq(v->vcpu_info_mfn, INVALID_MFN) )
+ return -EINVAL;
+
+--
+2.17.1
+
diff --git a/system/xen/xsa/xsa328-4.13-1.patch b/system/xen/xsa/xsa328-4.13-1.patch
new file mode 100644
index 0000000000..56e48de3e9
--- /dev/null
+++ b/system/xen/xsa/xsa328-4.13-1.patch
@@ -0,0 +1,118 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/EPT: ept_set_middle_entry() related adjustments
+
+ept_split_super_page() wants to further modify the newly allocated
+table, so have ept_set_middle_entry() return the mapped pointer rather
+than tearing it down and then getting re-established right again.
+
+Similarly ept_next_level() wants to hand back a mapped pointer of
+the next level page, so re-use the one established by
+ept_set_middle_entry() in case that path was taken.
+
+Pull the setting of suppress_ve ahead of insertion into the higher level
+table, and don't have ept_split_super_page() set the field a 2nd time.
+
+This is part of XSA-328.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -187,8 +187,9 @@ static void ept_p2m_type_to_flags(struct
+ #define GUEST_TABLE_SUPER_PAGE 2
+ #define GUEST_TABLE_POD_PAGE 3
+
+-/* Fill in middle levels of ept table */
+-static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
++/* Fill in middle level of ept table; return pointer to mapped new table. */
++static ept_entry_t *ept_set_middle_entry(struct p2m_domain *p2m,
++ ept_entry_t *ept_entry)
+ {
+ mfn_t mfn;
+ ept_entry_t *table;
+@@ -196,7 +197,12 @@ static int ept_set_middle_entry(struct p
+
+ mfn = p2m_alloc_ptp(p2m, 0);
+ if ( mfn_eq(mfn, INVALID_MFN) )
+- return 0;
++ return NULL;
++
++ table = map_domain_page(mfn);
++
++ for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
++ table[i].suppress_ve = 1;
+
+ ept_entry->epte = 0;
+ ept_entry->mfn = mfn_x(mfn);
+@@ -208,14 +214,7 @@ static int ept_set_middle_entry(struct p
+
+ ept_entry->suppress_ve = 1;
+
+- table = map_domain_page(mfn);
+-
+- for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+- table[i].suppress_ve = 1;
+-
+- unmap_domain_page(table);
+-
+- return 1;
++ return table;
+ }
+
+ /* free ept sub tree behind an entry */
+@@ -253,10 +252,10 @@ static bool_t ept_split_super_page(struc
+
+ ASSERT(is_epte_superpage(ept_entry));
+
+- if ( !ept_set_middle_entry(p2m, &new_ept) )
++ table = ept_set_middle_entry(p2m, &new_ept);
++ if ( !table )
+ return 0;
+
+- table = map_domain_page(_mfn(new_ept.mfn));
+ trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
+
+ for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+@@ -267,7 +266,6 @@ static bool_t ept_split_super_page(struc
+ epte->sp = (level > 1);
+ epte->mfn += i * trunk;
+ epte->snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
+- epte->suppress_ve = 1;
+
+ ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access);
+
+@@ -306,8 +304,7 @@ static int ept_next_level(struct p2m_dom
+ ept_entry_t **table, unsigned long *gfn_remainder,
+ int next_level)
+ {
+- unsigned long mfn;
+- ept_entry_t *ept_entry, e;
++ ept_entry_t *ept_entry, *next = NULL, e;
+ u32 shift, index;
+
+ shift = next_level * EPT_TABLE_ORDER;
+@@ -332,19 +329,17 @@ static int ept_next_level(struct p2m_dom
+ if ( read_only )
+ return GUEST_TABLE_MAP_FAILED;
+
+- if ( !ept_set_middle_entry(p2m, ept_entry) )
++ next = ept_set_middle_entry(p2m, ept_entry);
++ if ( !next )
+ return GUEST_TABLE_MAP_FAILED;
+- else
+- e = atomic_read_ept_entry(ept_entry); /* Refresh */
++ /* e is now stale and hence may not be used anymore below. */
+ }
+-
+ /* The only time sp would be set here is if we had hit a superpage */
+- if ( is_epte_superpage(&e) )
++ else if ( is_epte_superpage(&e) )
+ return GUEST_TABLE_SUPER_PAGE;
+
+- mfn = e.mfn;
+ unmap_domain_page(*table);
+- *table = map_domain_page(_mfn(mfn));
++ *table = next ?: map_domain_page(_mfn(e.mfn));
+ *gfn_remainder &= (1UL << shift) - 1;
+ return GUEST_TABLE_NORMAL_PAGE;
+ }
diff --git a/system/xen/xsa/xsa328-4.13-2.patch b/system/xen/xsa/xsa328-4.13-2.patch
new file mode 100644
index 0000000000..c4f437f625
--- /dev/null
+++ b/system/xen/xsa/xsa328-4.13-2.patch
@@ -0,0 +1,48 @@
+From: <security@xenproject.org>
+Subject: x86/ept: atomically modify entries in ept_next_level
+
+ept_next_level was passing a live PTE pointer to ept_set_middle_entry,
+which was then modified without taking into account that the PTE could
+be part of a live EPT table. This wasn't a security issue because the
+pages returned by p2m_alloc_ptp are zeroed, so adding such an entry
+before actually initializing it didn't allow a guest to access
+physical memory addresses it wasn't supposed to access.
+
+This is part of XSA-328.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -307,6 +307,8 @@ static int ept_next_level(struct p2m_dom
+ ept_entry_t *ept_entry, *next = NULL, e;
+ u32 shift, index;
+
++ ASSERT(next_level);
++
+ shift = next_level * EPT_TABLE_ORDER;
+
+ index = *gfn_remainder >> shift;
+@@ -323,16 +325,20 @@ static int ept_next_level(struct p2m_dom
+
+ if ( !is_epte_present(&e) )
+ {
++ int rc;
++
+ if ( e.sa_p2mt == p2m_populate_on_demand )
+ return GUEST_TABLE_POD_PAGE;
+
+ if ( read_only )
+ return GUEST_TABLE_MAP_FAILED;
+
+- next = ept_set_middle_entry(p2m, ept_entry);
++ next = ept_set_middle_entry(p2m, &e);
+ if ( !next )
+ return GUEST_TABLE_MAP_FAILED;
+- /* e is now stale and hence may not be used anymore below. */
++
++ rc = atomic_write_ept_entry(p2m, ept_entry, e, next_level);
++ ASSERT(rc == 0);
+ }
+ /* The only time sp would be set here is if we had hit a superpage */
+ else if ( is_epte_superpage(&e) )
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-1.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-1.patch
new file mode 100644
index 0000000000..9a08ab240e
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-1.patch
@@ -0,0 +1,31 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: vtd: improve IOMMU TLB flush
+
+Do not limit PSI flushes to order 0 pages, in order to avoid doing a
+full TLB flush if the passed in page has an order greater than 0 and
+is aligned. Should increase the performance of IOMMU TLB flushes when
+dealing with page orders greater than 0.
+
+This is part of XSA-321.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -570,13 +570,14 @@ static int __must_check iommu_flush_iotl
+ if ( iommu_domid == -1 )
+ continue;
+
+- if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) )
++ if ( !page_count || (page_count & (page_count - 1)) ||
++ dfn_eq(dfn, INVALID_DFN) || !IS_ALIGNED(dfn_x(dfn), page_count) )
+ rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
+ 0, flush_dev_iotlb);
+ else
+ rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+ dfn_to_daddr(dfn),
+- PAGE_ORDER_4K,
++ get_order_from_pages(page_count),
+ !dma_old_pte_present,
+ flush_dev_iotlb);
+
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-2.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-2.patch
new file mode 100644
index 0000000000..1e48615f2b
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-2.patch
@@ -0,0 +1,175 @@
+From: <security@xenproject.org>
+Subject: vtd: prune (and rename) cache flush functions
+
+Rename __iommu_flush_cache to iommu_sync_cache and remove
+iommu_flush_cache_page. Also remove the iommu_flush_cache_entry
+wrapper and just use iommu_sync_cache instead. Note the _entry suffix
+was meaningless as the wrapper was already taking a size parameter in
+bytes. While there also constify the addr parameter.
+
+No functional change intended.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -43,8 +43,7 @@ void disable_qinval(struct vtd_iommu *io
+ int enable_intremap(struct vtd_iommu *iommu, int eim);
+ void disable_intremap(struct vtd_iommu *iommu);
+
+-void iommu_flush_cache_entry(void *addr, unsigned int size);
+-void iommu_flush_cache_page(void *addr, unsigned long npages);
++void iommu_sync_cache(const void *addr, unsigned int size);
+ int iommu_alloc(struct acpi_drhd_unit *drhd);
+ void iommu_free(struct acpi_drhd_unit *drhd);
+
+--- a/xen/drivers/passthrough/vtd/intremap.c
++++ b/xen/drivers/passthrough/vtd/intremap.c
+@@ -230,7 +230,7 @@ static void free_remap_entry(struct vtd_
+ iremap_entries, iremap_entry);
+
+ update_irte(iommu, iremap_entry, &new_ire, false);
+- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+ iommu_flush_iec_index(iommu, 0, index);
+
+ unmap_vtd_domain_page(iremap_entries);
+@@ -406,7 +406,7 @@ static int ioapic_rte_to_remap_entry(str
+ }
+
+ update_irte(iommu, iremap_entry, &new_ire, !init);
+- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+ iommu_flush_iec_index(iommu, 0, index);
+
+ unmap_vtd_domain_page(iremap_entries);
+@@ -695,7 +695,7 @@ static int msi_msg_to_remap_entry(
+ update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized);
+ msi_desc->irte_initialized = true;
+
+- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+ iommu_flush_iec_index(iommu, 0, index);
+
+ unmap_vtd_domain_page(iremap_entries);
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -140,7 +140,8 @@ static int context_get_domain_id(struct
+ }
+
+ static int iommus_incoherent;
+-static void __iommu_flush_cache(void *addr, unsigned int size)
++
++void iommu_sync_cache(const void *addr, unsigned int size)
+ {
+ int i;
+ static unsigned int clflush_size = 0;
+@@ -155,16 +156,6 @@ static void __iommu_flush_cache(void *ad
+ cacheline_flush((char *)addr + i);
+ }
+
+-void iommu_flush_cache_entry(void *addr, unsigned int size)
+-{
+- __iommu_flush_cache(addr, size);
+-}
+-
+-void iommu_flush_cache_page(void *addr, unsigned long npages)
+-{
+- __iommu_flush_cache(addr, PAGE_SIZE * npages);
+-}
+-
+ /* Allocate page table, return its machine address */
+ uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node)
+ {
+@@ -183,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo
+ vaddr = __map_domain_page(cur_pg);
+ memset(vaddr, 0, PAGE_SIZE);
+
+- iommu_flush_cache_page(vaddr, 1);
++ iommu_sync_cache(vaddr, PAGE_SIZE);
+ unmap_domain_page(vaddr);
+ cur_pg++;
+ }
+@@ -216,7 +207,7 @@ static u64 bus_to_context_maddr(struct v
+ }
+ set_root_value(*root, maddr);
+ set_root_present(*root);
+- iommu_flush_cache_entry(root, sizeof(struct root_entry));
++ iommu_sync_cache(root, sizeof(struct root_entry));
+ }
+ maddr = (u64) get_context_addr(*root);
+ unmap_vtd_domain_page(root_entries);
+@@ -263,7 +254,7 @@ static u64 addr_to_dma_page_maddr(struct
+ */
+ dma_set_pte_readable(*pte);
+ dma_set_pte_writable(*pte);
+- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++ iommu_sync_cache(pte, sizeof(struct dma_pte));
+ }
+
+ if ( level == 2 )
+@@ -640,7 +631,7 @@ static int __must_check dma_pte_clear_on
+ *flush_flags |= IOMMU_FLUSHF_modified;
+
+ spin_unlock(&hd->arch.mapping_lock);
+- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++ iommu_sync_cache(pte, sizeof(struct dma_pte));
+
+ unmap_vtd_domain_page(page);
+
+@@ -679,7 +670,7 @@ static void iommu_free_page_table(struct
+ iommu_free_pagetable(dma_pte_addr(*pte), next_level);
+
+ dma_clear_pte(*pte);
+- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++ iommu_sync_cache(pte, sizeof(struct dma_pte));
+ }
+
+ unmap_vtd_domain_page(pt_vaddr);
+@@ -1400,7 +1391,7 @@ int domain_context_mapping_one(
+ context_set_address_width(*context, agaw);
+ context_set_fault_enable(*context);
+ context_set_present(*context);
+- iommu_flush_cache_entry(context, sizeof(struct context_entry));
++ iommu_sync_cache(context, sizeof(struct context_entry));
+ spin_unlock(&iommu->lock);
+
+ /* Context entry was previously non-present (with domid 0). */
+@@ -1564,7 +1555,7 @@ int domain_context_unmap_one(
+
+ context_clear_present(*context);
+ context_clear_entry(*context);
+- iommu_flush_cache_entry(context, sizeof(struct context_entry));
++ iommu_sync_cache(context, sizeof(struct context_entry));
+
+ iommu_domid= domain_iommu_domid(domain, iommu);
+ if ( iommu_domid == -1 )
+@@ -1791,7 +1782,7 @@ static int __must_check intel_iommu_map_
+
+ *pte = new;
+
+- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++ iommu_sync_cache(pte, sizeof(struct dma_pte));
+ spin_unlock(&hd->arch.mapping_lock);
+ unmap_vtd_domain_page(page);
+
+@@ -1866,7 +1857,7 @@ int iommu_pte_flush(struct domain *d, ui
+ int iommu_domid;
+ int rc = 0;
+
+- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++ iommu_sync_cache(pte, sizeof(struct dma_pte));
+
+ for_each_drhd_unit ( drhd )
+ {
+@@ -2724,7 +2715,7 @@ static int __init intel_iommu_quarantine
+ dma_set_pte_addr(*pte, maddr);
+ dma_set_pte_readable(*pte);
+ }
+- iommu_flush_cache_page(parent, 1);
++ iommu_sync_cache(parent, PAGE_SIZE);
+
+ unmap_vtd_domain_page(parent);
+ parent = map_vtd_domain_page(maddr);
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-3.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-3.patch
new file mode 100644
index 0000000000..c141c4b785
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-3.patch
@@ -0,0 +1,82 @@
+From: <security@xenproject.org>
+Subject: x86/iommu: introduce a cache sync hook
+
+The hook is only implemented for VT-d and it uses the already existing
+iommu_sync_cache function present in VT-d code. The new hook is
+added so that the cache can be flushed by code outside of VT-d when
+using shared page tables.
+
+Note that alloc_pgtable_maddr must use the now locally defined
+sync_cache function, because IOMMU ops are not yet setup the first
+time the function gets called during IOMMU initialization.
+
+No functional change intended.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -43,7 +43,6 @@ void disable_qinval(struct vtd_iommu *io
+ int enable_intremap(struct vtd_iommu *iommu, int eim);
+ void disable_intremap(struct vtd_iommu *iommu);
+
+-void iommu_sync_cache(const void *addr, unsigned int size);
+ int iommu_alloc(struct acpi_drhd_unit *drhd);
+ void iommu_free(struct acpi_drhd_unit *drhd);
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -141,7 +141,7 @@ static int context_get_domain_id(struct
+
+ static int iommus_incoherent;
+
+-void iommu_sync_cache(const void *addr, unsigned int size)
++static void sync_cache(const void *addr, unsigned int size)
+ {
+ int i;
+ static unsigned int clflush_size = 0;
+@@ -174,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo
+ vaddr = __map_domain_page(cur_pg);
+ memset(vaddr, 0, PAGE_SIZE);
+
+- iommu_sync_cache(vaddr, PAGE_SIZE);
++ sync_cache(vaddr, PAGE_SIZE);
+ unmap_domain_page(vaddr);
+ cur_pg++;
+ }
+@@ -2763,6 +2763,7 @@ const struct iommu_ops __initconstrel in
+ .iotlb_flush_all = iommu_flush_iotlb_all,
+ .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
+ .dump_p2m_table = vtd_dump_p2m_table,
++ .sync_cache = sync_cache,
+ };
+
+ const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
+--- a/xen/include/asm-x86/iommu.h
++++ b/xen/include/asm-x86/iommu.h
+@@ -121,6 +121,13 @@ extern bool untrusted_msi;
+ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
+ const uint8_t gvec);
+
++#define iommu_sync_cache(addr, size) ({ \
++ const struct iommu_ops *ops = iommu_get_ops(); \
++ \
++ if ( ops->sync_cache ) \
++ iommu_vcall(ops, sync_cache, addr, size); \
++})
++
+ #endif /* !__ARCH_X86_IOMMU_H__ */
+ /*
+ * Local variables:
+--- a/xen/include/xen/iommu.h
++++ b/xen/include/xen/iommu.h
+@@ -250,6 +250,7 @@ struct iommu_ops {
+ int (*setup_hpet_msi)(struct msi_desc *);
+
+ int (*adjust_irq_affinities)(void);
++ void (*sync_cache)(const void *addr, unsigned int size);
+ #endif /* CONFIG_X86 */
+
+ int __must_check (*suspend)(void);
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-4.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-4.patch
new file mode 100644
index 0000000000..62bbcc7271
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-4.patch
@@ -0,0 +1,36 @@
+From: <security@xenproject.org>
+Subject: vtd: don't assume addresses are aligned in sync_cache
+
+Current code in sync_cache assume that the address passed in is
+aligned to a cache line size. Fix the code to support passing in
+arbitrary addresses not necessarily aligned to a cache line size.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -143,8 +143,8 @@ static int iommus_incoherent;
+
+ static void sync_cache(const void *addr, unsigned int size)
+ {
+- int i;
+- static unsigned int clflush_size = 0;
++ static unsigned long clflush_size = 0;
++ const void *end = addr + size;
+
+ if ( !iommus_incoherent )
+ return;
+@@ -152,8 +152,9 @@ static void sync_cache(const void *addr,
+ if ( clflush_size == 0 )
+ clflush_size = get_cache_line_size();
+
+- for ( i = 0; i < size; i += clflush_size )
+- cacheline_flush((char *)addr + i);
++ addr -= (unsigned long)addr & (clflush_size - 1);
++ for ( ; addr < end; addr += clflush_size )
++ cacheline_flush((char *)addr);
+ }
+
+ /* Allocate page table, return its machine address */
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-5.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-5.patch
new file mode 100644
index 0000000000..60cfe6ccdf
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-5.patch
@@ -0,0 +1,24 @@
+From: <security@xenproject.org>
+Subject: x86/alternative: introduce alternative_2
+
+It's based on alternative_io_2 without inputs or outputs but with an
+added memory clobber.
+
+This is part of XSA-321.
+
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/include/asm-x86/alternative.h
++++ b/xen/include/asm-x86/alternative.h
+@@ -114,6 +114,11 @@ extern void alternative_branches(void);
+ #define alternative(oldinstr, newinstr, feature) \
+ asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
+
++#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
++ asm volatile (ALTERNATIVE_2(oldinstr, newinstr1, feature1, \
++ newinstr2, feature2) \
++ : : : "memory")
++
+ /*
+ * Alternative inline assembly with input.
+ *
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-6.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-6.patch
new file mode 100644
index 0000000000..4c5c5ab0ba
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-6.patch
@@ -0,0 +1,91 @@
+From: <security@xenproject.org>
+Subject: vtd: optimize CPU cache sync
+
+Some VT-d IOMMUs are non-coherent, which requires a cache write back
+in order for the changes made by the CPU to be visible to the IOMMU.
+This cache write back was unconditionally done using clflush, but there are
+other more efficient instructions to do so, hence implement support
+for them using the alternative framework.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -68,7 +68,6 @@ int __must_check qinval_device_iotlb_syn
+ u16 did, u16 size, u64 addr);
+
+ unsigned int get_cache_line_size(void);
+-void cacheline_flush(char *);
+ void flush_all_cache(void);
+
+ uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node);
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -31,6 +31,7 @@
+ #include <xen/pci_regs.h>
+ #include <xen/keyhandler.h>
+ #include <asm/msi.h>
++#include <asm/nops.h>
+ #include <asm/irq.h>
+ #include <asm/hvm/vmx/vmx.h>
+ #include <asm/p2m.h>
+@@ -154,7 +155,42 @@ static void sync_cache(const void *addr,
+
+ addr -= (unsigned long)addr & (clflush_size - 1);
+ for ( ; addr < end; addr += clflush_size )
+- cacheline_flush((char *)addr);
++/*
++ * The arguments to a macro must not include preprocessor directives. Doing so
++ * results in undefined behavior, so we have to create some defines here in
++ * order to avoid it.
++ */
++#if defined(HAVE_AS_CLWB)
++# define CLWB_ENCODING "clwb %[p]"
++#elif defined(HAVE_AS_XSAVEOPT)
++# define CLWB_ENCODING "data16 xsaveopt %[p]" /* clwb */
++#else
++# define CLWB_ENCODING ".byte 0x66, 0x0f, 0xae, 0x30" /* clwb (%%rax) */
++#endif
++
++#define BASE_INPUT(addr) [p] "m" (*(const char *)(addr))
++#if defined(HAVE_AS_CLWB) || defined(HAVE_AS_XSAVEOPT)
++# define INPUT BASE_INPUT
++#else
++# define INPUT(addr) "a" (addr), BASE_INPUT(addr)
++#endif
++ /*
++ * Note regarding the use of NOP_DS_PREFIX: it's faster to do a clflush
++ * + prefix than a clflush + nop, and hence the prefix is added instead
++ * of letting the alternative framework fill the gap by appending nops.
++ */
++ alternative_io_2(".byte " __stringify(NOP_DS_PREFIX) "; clflush %[p]",
++ "data16 clflush %[p]", /* clflushopt */
++ X86_FEATURE_CLFLUSHOPT,
++ CLWB_ENCODING,
++ X86_FEATURE_CLWB, /* no outputs */,
++ INPUT(addr));
++#undef INPUT
++#undef BASE_INPUT
++#undef CLWB_ENCODING
++
++ alternative_2("", "sfence", X86_FEATURE_CLFLUSHOPT,
++ "sfence", X86_FEATURE_CLWB);
+ }
+
+ /* Allocate page table, return its machine address */
+--- a/xen/drivers/passthrough/vtd/x86/vtd.c
++++ b/xen/drivers/passthrough/vtd/x86/vtd.c
+@@ -51,11 +51,6 @@ unsigned int get_cache_line_size(void)
+ return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
+ }
+
+-void cacheline_flush(char * addr)
+-{
+- clflush(addr);
+-}
+-
+ void flush_all_cache()
+ {
+ wbinvd();
diff --git a/system/xen/xsa/xsa328-post-xsa321-4.13-7.patch b/system/xen/xsa/xsa328-post-xsa321-4.13-7.patch
new file mode 100644
index 0000000000..0bd018f972
--- /dev/null
+++ b/system/xen/xsa/xsa328-post-xsa321-4.13-7.patch
@@ -0,0 +1,153 @@
+From: <security@xenproject.org>
+Subject: x86/ept: flush cache when modifying PTEs and sharing page tables
+
+Modifications made to the page tables by EPT code need to be written
+to memory when the page tables are shared with the IOMMU, as Intel
+IOMMUs can be non-coherent and thus require changes to be written to
+memory in order to be visible to the IOMMU.
+
+In order to achieve this make sure data is written back to memory
+after writing an EPT entry when the recalc bit is not set in
+atomic_write_ept_entry. If such bit is set, the entry will be
+adjusted and atomic_write_ept_entry will be called a second time
+without the recalc bit set. Note that when splitting a super page the
+new tables resulting of the split should also be written back.
+
+Failure to do so can allow devices behind the IOMMU access to the
+stale super page, or cause coherency issues as changes made by the
+processor to the page tables are not visible to the IOMMU.
+
+This allows to remove the VT-d specific iommu_pte_flush helper, since
+the cache write back is now performed by atomic_write_ept_entry, and
+hence iommu_iotlb_flush can be used to flush the IOMMU TLB. The newly
+used method (iommu_iotlb_flush) can result in less flushes, since it
+might sometimes be called rightly with 0 flags, in which case it
+becomes a no-op.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -58,6 +58,19 @@ static int atomic_write_ept_entry(struct
+
+ write_atomic(&entryptr->epte, new.epte);
+
++ /*
++ * The recalc field on the EPT is used to signal either that a
++ * recalculation of the EMT field is required (which doesn't effect the
++ * IOMMU), or a type change. Type changes can only be between ram_rw,
++ * logdirty and ioreq_server: changes to/from logdirty won't work well with
++ * an IOMMU anyway, as IOMMU #PFs are not synchronous and will lead to
++ * aborts, and changes to/from ioreq_server are already fully flushed
++ * before returning to guest context (see
++ * XEN_DMOP_map_mem_type_to_ioreq_server).
++ */
++ if ( !new.recalc && iommu_use_hap_pt(p2m->domain) )
++ iommu_sync_cache(entryptr, sizeof(*entryptr));
++
+ return 0;
+ }
+
+@@ -278,6 +291,9 @@ static bool_t ept_split_super_page(struc
+ break;
+ }
+
++ if ( iommu_use_hap_pt(p2m->domain) )
++ iommu_sync_cache(table, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
++
+ unmap_domain_page(table);
+
+ /* Even failed we should install the newly allocated ept page. */
+@@ -337,6 +353,9 @@ static int ept_next_level(struct p2m_dom
+ if ( !next )
+ return GUEST_TABLE_MAP_FAILED;
+
++ if ( iommu_use_hap_pt(p2m->domain) )
++ iommu_sync_cache(next, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
++
+ rc = atomic_write_ept_entry(p2m, ept_entry, e, next_level);
+ ASSERT(rc == 0);
+ }
+@@ -821,7 +840,10 @@ out:
+ need_modify_vtd_table )
+ {
+ if ( iommu_use_hap_pt(d) )
+- rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
++ rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order),
++ (iommu_flags ? IOMMU_FLUSHF_added : 0) |
++ (vtd_pte_present ? IOMMU_FLUSHF_modified
++ : 0));
+ else if ( need_iommu_pt_sync(d) )
+ rc = iommu_flags ?
+ iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -1884,53 +1884,6 @@ static int intel_iommu_lookup_page(struc
+ return 0;
+ }
+
+-int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
+- int order, int present)
+-{
+- struct acpi_drhd_unit *drhd;
+- struct vtd_iommu *iommu = NULL;
+- struct domain_iommu *hd = dom_iommu(d);
+- bool_t flush_dev_iotlb;
+- int iommu_domid;
+- int rc = 0;
+-
+- iommu_sync_cache(pte, sizeof(struct dma_pte));
+-
+- for_each_drhd_unit ( drhd )
+- {
+- iommu = drhd->iommu;
+- if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
+- continue;
+-
+- flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
+- iommu_domid= domain_iommu_domid(d, iommu);
+- if ( iommu_domid == -1 )
+- continue;
+-
+- rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+- __dfn_to_daddr(dfn),
+- order, !present, flush_dev_iotlb);
+- if ( rc > 0 )
+- {
+- iommu_flush_write_buffer(iommu);
+- rc = 0;
+- }
+- }
+-
+- if ( unlikely(rc) )
+- {
+- if ( !d->is_shutting_down && printk_ratelimit() )
+- printk(XENLOG_ERR VTDPREFIX
+- " d%d: IOMMU pages flush failed: %d\n",
+- d->domain_id, rc);
+-
+- if ( !is_hardware_domain(d) )
+- domain_crash(d);
+- }
+-
+- return rc;
+-}
+-
+ static int __init vtd_ept_page_compatible(struct vtd_iommu *iommu)
+ {
+ u64 ept_cap, vtd_cap = iommu->cap;
+--- a/xen/include/asm-x86/iommu.h
++++ b/xen/include/asm-x86/iommu.h
+@@ -97,10 +97,6 @@ static inline int iommu_adjust_irq_affin
+ : 0;
+ }
+
+-/* While VT-d specific, this must get declared in a generic header. */
+-int __must_check iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+- int order, int present);
+-
+ static inline bool iommu_supports_x2apic(void)
+ {
+ return iommu_init_ops && iommu_init_ops->supports_x2apic