summaryrefslogtreecommitdiffstats
path: root/system/xen/patches
diff options
context:
space:
mode:
author Mario Preksavec <mario@slackware.hr>2016-02-21 01:53:52 +0100
committer Willy Sudiarto Raharjo <willysr@slackbuilds.org>2016-02-23 00:57:58 +0700
commitb53777993b305448c657fa5dc1df089b107b3d3a (patch)
tree7140eee72449eb56cb8159d3f3a5576a5c6e584a /system/xen/patches
parentd8bf1d6168c313ead10877b4a5770e88c575657c (diff)
downloadslackbuilds-b53777993b305448c657fa5dc1df089b107b3d3a.tar.gz
slackbuilds-b53777993b305448c657fa5dc1df089b107b3d3a.tar.xz
system/xen: Updated for version 4.6.1.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/patches')
-rw-r--r--system/xen/patches/disable_git_checkout.diff14
-rw-r--r--system/xen/patches/libcacard_fix_parallel_compile.patch13
-rw-r--r--system/xen/patches/local_attach_support_for_phy.patch58
-rw-r--r--system/xen/patches/remove_malloc_tracing.diff62
-rw-r--r--system/xen/patches/symlinks_instead_of_hardlinks.diff31
-rw-r--r--system/xen/patches/xsa154-4.6.patch359
-rw-r--r--system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch56
-rw-r--r--system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch75
-rw-r--r--system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch41
-rw-r--r--system/xen/patches/xsa162-qemuu.patch42
-rw-r--r--system/xen/patches/xsa170.patch79
11 files changed, 742 insertions, 88 deletions
diff --git a/system/xen/patches/disable_git_checkout.diff b/system/xen/patches/disable_git_checkout.diff
index 54738c348d..292a8ba569 100644
--- a/system/xen/patches/disable_git_checkout.diff
+++ b/system/xen/patches/disable_git_checkout.diff
@@ -1,6 +1,6 @@
---- xen-4.5.0/tools/Makefile.orig 2015-01-12 17:53:24.000000000 +0100
-+++ xen-4.5.0/tools/Makefile 2015-02-16 10:12:09.487338727 +0100
-@@ -134,17 +134,17 @@
+--- xen-4.6.1/tools/Makefile.orig 2016-02-09 15:44:19.000000000 +0100
++++ xen-4.6.1/tools/Makefile 2016-02-20 20:22:38.659839628 +0100
+@@ -168,9 +168,9 @@
set -ex; \
if test -d $(QEMU_TRADITIONAL_LOC); then \
mkdir -p qemu-xen-traditional-dir; \
@@ -12,6 +12,8 @@
+# $(XEN_ROOT)/scripts/git-checkout.sh $(QEMU_TRADITIONAL_LOC) $(QEMU_TRADITIONAL_REVISION) qemu-xen-traditional-dir; \
fi
+ .PHONY: qemu-xen-traditional-dir-force-update
+@@ -214,9 +214,9 @@
qemu-xen-dir-find:
if test -d $(QEMU_UPSTREAM_LOC) ; then \
mkdir -p qemu-xen-dir; \
@@ -23,9 +25,9 @@
+# $(XEN_ROOT)/scripts/git-checkout.sh $(QEMU_UPSTREAM_LOC) $(QEMU_UPSTREAM_REVISION) qemu-xen-dir ; \
fi
- .PHONY: qemu-xen-traditional-dir-force-update
---- xen-4.5.0/tools/firmware/Makefile.orig 2015-01-12 17:53:24.000000000 +0100
-+++ xen-4.5.0/tools/firmware/Makefile 2015-02-16 10:12:27.276869964 +0100
+ .PHONY: qemu-xen-dir-force-update
+--- xen-4.6.1/tools/firmware/Makefile.orig 2016-02-09 15:44:19.000000000 +0100
++++ xen-4.6.1/tools/firmware/Makefile 2016-02-20 20:23:33.994923068 +0100
@@ -20,11 +20,11 @@
LD32BIT-$(CONFIG_FreeBSD) := LD32BIT_FLAG=-melf_i386_fbsd
diff --git a/system/xen/patches/libcacard_fix_parallel_compile.patch b/system/xen/patches/libcacard_fix_parallel_compile.patch
deleted file mode 100644
index 1dfd044518..0000000000
--- a/system/xen/patches/libcacard_fix_parallel_compile.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- xen-4.5.0/tools/qemu-xen/libcacard/Makefile.orig 2014-12-02 11:41:02.000000000 +0100
-+++ xen-4.5.0/tools/qemu-xen/libcacard/Makefile 2015-03-14 21:05:54.361325689 +0100
-@@ -26,8 +26,8 @@
- libcacard.la: LDFLAGS += -rpath $(libdir) -no-undefined \
- -export-syms $(SRC_PATH)/libcacard/libcacard.syms
- libcacard.la: LIBS = $(libcacard_libs)
--libcacard.la: $(libcacard-lobj-y)
-- $(call LINK,$^)
-+libcacard.la: $(libcacard-obj-y)
-+ $(call LINK,$(libcacard-lobj-y))
-
- libcacard.pc: $(SRC_PATH)/libcacard/libcacard.pc.in
- $(call quiet-command,sed -e 's|@LIBDIR@|$(libdir)|' \
diff --git a/system/xen/patches/local_attach_support_for_phy.patch b/system/xen/patches/local_attach_support_for_phy.patch
deleted file mode 100644
index 5dd402d364..0000000000
--- a/system/xen/patches/local_attach_support_for_phy.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-commit 3bcf91cbbd9a18db9ae7d594ffde7979774ed512
-Author: Roger Pau Monne <roger.pau@xxxxxxxxxx>
-Date: Wed Feb 12 11:15:17 2014 +0100
-
- libxl: local attach support for PHY backends using scripts
-
- Allow disks using the PHY backend to locally attach if using a script.
-
- Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
- Suggested-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
-
-
-Index: xen-4.5.0-testing/tools/libxl/libxl.c
-===================================================================
---- xen-4.5.0-testing.orig/tools/libxl/libxl.c
-+++ xen-4.5.0-testing/tools/libxl/libxl.c
-@@ -3006,6 +3006,16 @@ void libxl__device_disk_local_initiate_a
-
- switch (disk->backend) {
- case LIBXL_DISK_BACKEND_PHY:
-+ if (disk->script != NULL) {
-+ LOG(DEBUG, "trying to locally attach PHY device %s with script %s",
-+ disk->pdev_path, disk->script);
-+ libxl__prepare_ao_device(ao, &dls->aodev);
-+ dls->aodev.callback = local_device_attach_cb;
-+ device_disk_add(egc, LIBXL_TOOLSTACK_DOMID, disk,
-+ &dls->aodev, libxl__alloc_vdev,
-+ (void *) blkdev_start);
-+ return;
-+ }
- LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "locally attaching PHY disk %s",
- disk->pdev_path);
- dev = disk->pdev_path;
-@@ -3085,7 +3095,7 @@ static void local_device_attach_cb(libxl
- }
-
- dev = GCSPRINTF("/dev/%s", disk->vdev);
-- LOG(DEBUG, "locally attaching qdisk %s", dev);
-+ LOG(DEBUG, "locally attached disk %s", dev);
-
- rc = libxl__device_from_disk(gc, LIBXL_TOOLSTACK_DOMID, disk, &device);
- if (rc < 0)
-@@ -3125,6 +3135,7 @@ void libxl__device_disk_local_initiate_d
- if (!dls->diskpath) goto out;
-
- switch (disk->backend) {
-+ case LIBXL_DISK_BACKEND_PHY:
- case LIBXL_DISK_BACKEND_QDISK:
- if (disk->vdev != NULL) {
- GCNEW(device);
-@@ -3142,7 +3153,6 @@ void libxl__device_disk_local_initiate_d
- /* disk->vdev == NULL; fall through */
- default:
- /*
-- * Nothing to do for PHYSTYPE_PHY.
- * For other device types assume that the blktap2 process is
- * needed by the soon to be started domain and do nothing.
- */
diff --git a/system/xen/patches/remove_malloc_tracing.diff b/system/xen/patches/remove_malloc_tracing.diff
new file mode 100644
index 0000000000..293000fdc6
--- /dev/null
+++ b/system/xen/patches/remove_malloc_tracing.diff
@@ -0,0 +1,62 @@
+--- xen-4.6.1/tools/qemu-xen/trace-events.orig 2016-01-06 17:42:43.000000000 +0100
++++ xen-4.6.1/tools/qemu-xen/trace-events 2016-02-20 20:36:48.996704075 +0100
+@@ -571,9 +571,6 @@
+ vm_state_notify(int running, int reason) "running %d reason %d"
+ load_file(const char *name, const char *path) "name %s location %s"
+ runstate_set(int new_state) "new state %d"
+-g_malloc(size_t size, void *ptr) "size %zu ptr %p"
+-g_realloc(void *ptr, size_t size, void *newptr) "ptr %p size %zu newptr %p"
+-g_free(void *ptr) "ptr %p"
+ system_wakeup_request(int reason) "reason=%d"
+ qemu_system_shutdown_request(void) ""
+ qemu_system_powerdown_request(void) ""
+--- xen-4.6.1/tools/qemu-xen/vl.c.orig 2016-01-06 17:42:43.000000000 +0100
++++ xen-4.6.1/tools/qemu-xen/vl.c 2016-02-20 20:38:17.715227938 +0100
+@@ -2628,26 +2628,6 @@
+ return popt;
+ }
+
+-static gpointer malloc_and_trace(gsize n_bytes)
+-{
+- void *ptr = malloc(n_bytes);
+- trace_g_malloc(n_bytes, ptr);
+- return ptr;
+-}
+-
+-static gpointer realloc_and_trace(gpointer mem, gsize n_bytes)
+-{
+- void *ptr = realloc(mem, n_bytes);
+- trace_g_realloc(mem, n_bytes, ptr);
+- return ptr;
+-}
+-
+-static void free_and_trace(gpointer mem)
+-{
+- trace_g_free(mem);
+- free(mem);
+-}
+-
+ static int machine_set_property(const char *name, const char *value,
+ void *opaque)
+ {
+@@ -2763,11 +2743,6 @@
+ bool userconfig = true;
+ const char *log_mask = NULL;
+ const char *log_file = NULL;
+- GMemVTable mem_trace = {
+- .malloc = malloc_and_trace,
+- .realloc = realloc_and_trace,
+- .free = free_and_trace,
+- };
+ const char *trace_events = NULL;
+ const char *trace_file = NULL;
+ const ram_addr_t default_ram_size = (ram_addr_t)DEFAULT_RAM_SIZE *
+@@ -2781,8 +2756,6 @@
+ error_set_progname(argv[0]);
+ qemu_init_exec_dir(argv[0]);
+
+- g_mem_set_vtable(&mem_trace);
+-
+ module_call_init(MODULE_INIT_QOM);
+
+ qemu_add_opts(&qemu_drive_opts);
diff --git a/system/xen/patches/symlinks_instead_of_hardlinks.diff b/system/xen/patches/symlinks_instead_of_hardlinks.diff
index 50af6c3254..92c98bd980 100644
--- a/system/xen/patches/symlinks_instead_of_hardlinks.diff
+++ b/system/xen/patches/symlinks_instead_of_hardlinks.diff
@@ -1,6 +1,6 @@
---- xen-4.2.0/tools/xenstore/Makefile.ORIG 2012-09-17 12:21:19.000000000 +0200
-+++ xen-4.2.0/tools/xenstore/Makefile 2012-10-21 22:59:54.585759242 +0200
-@@ -64,7 +64,7 @@
+--- xen-4.6.1/tools/xenstore/Makefile.orig 2016-02-09 15:44:19.000000000 +0100
++++ xen-4.6.1/tools/xenstore/Makefile 2016-02-20 22:54:11.877906517 +0100
+@@ -84,7 +84,7 @@
$(AR) cr $@ $^
$(CLIENTS): xenstore
@@ -8,13 +8,22 @@
+ ln -sf xenstore $@
xenstore: xenstore_client.o $(LIBXENSTORE)
- $(CC) $(LDFLAGS) $< $(LDLIBS_libxenstore) $(SOCKET_LIBS) -o $@ $(APPEND_LDFLAGS)
-@@ -116,7 +116,7 @@
- $(INSTALL_PROG) xenstore-control $(DESTDIR)$(BINDIR)
- $(INSTALL_PROG) xenstore $(DESTDIR)$(BINDIR)
+ $(CC) $< $(LDFLAGS) $(LDLIBS_libxenstore) $(SOCKET_LIBS) -o $@ $(APPEND_LDFLAGS)
+@@ -140,7 +140,7 @@
+ $(INSTALL_PROG) xenstore-control $(DESTDIR)$(bindir)
+ $(INSTALL_PROG) xenstore $(DESTDIR)$(bindir)
set -e ; for c in $(CLIENTS) ; do \
-- ln -f $(DESTDIR)$(BINDIR)/xenstore $(DESTDIR)$(BINDIR)/$${c} ; \
-+ ln -sf xenstore $(DESTDIR)$(BINDIR)/$${c} ; \
+- ln -f $(DESTDIR)$(bindir)/xenstore $(DESTDIR)$(bindir)/$${c} ; \
++ ln -sf xenstore $(DESTDIR)$(bindir)/$${c} ; \
done
- $(INSTALL_DIR) $(DESTDIR)$(LIBDIR)
- $(INSTALL_PROG) libxenstore.so.$(MAJOR).$(MINOR) $(DESTDIR)$(LIBDIR)
+ $(INSTALL_DIR) $(DESTDIR)$(libdir)
+ $(INSTALL_SHLIB) libxenstore.so.$(MAJOR).$(MINOR) $(DESTDIR)$(libdir)
+@@ -159,7 +159,7 @@
+ $(INSTALL_DIR) $(DESTDIR)$(bindir)
+ $(INSTALL_PROG) xenstore $(DESTDIR)$(bindir)
+ set -e ; for c in $(CLIENTS) ; do \
+- ln -f $(DESTDIR)$(bindir)/xenstore $(DESTDIR)$(bindir)/$${c} ; \
++ ln -sf xenstore $(DESTDIR)$(bindir)/$${c} ; \
+ done
+
+ -include $(DEPS)
diff --git a/system/xen/patches/xsa154-4.6.patch b/system/xen/patches/xsa154-4.6.patch
new file mode 100644
index 0000000000..f1e598812b
--- /dev/null
+++ b/system/xen/patches/xsa154-4.6.patch
@@ -0,0 +1,359 @@
+x86: enforce consistent cachability of MMIO mappings
+
+We've been told by Intel that inconsistent cachability between
+multiple mappings of the same page can affect system stability only
+when the affected page is an MMIO one. Since the stale data issue is
+of no relevance to the hypervisor (since all guest memory accesses go
+through proper accessors and validation), handling of RAM pages
+remains unchanged here. Any MMIO mapped by domains however needs to be
+done consistently (all cachable mappings or all uncachable ones), in
+order to avoid Machine Check exceptions. Since converting existing
+cachable mappings to uncachable (at the time an uncachable mapping
+gets established) would in the PV case require tracking all mappings,
+allow MMIO to only get mapped uncachable (UC, UC-, or WC).
+
+This also implies that in the PV case we mustn't use the L1 PTE update
+fast path when cachability flags get altered.
+
+Since in the HVM case at least for now we want to continue honoring
+pinned cachability attributes for pages not mapped by the hypervisor,
+special case handling of r/o MMIO pages (forcing UC) gets added there.
+Arguably the counterpart change to p2m-pt.c may not be necessary, since
+UC- (which already gets enforced there) is probably strict enough.
+
+Note that the shadow code changes include fixing the write protection
+of r/o MMIO ranges: shadow_l1e_remove_flags() and its siblings, other
+than l1e_remove_flags() and alike, return the new PTE (and hence
+ignoring their return values makes them no-ops).
+
+This is CVE-2016-2270 / XSA-154.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/docs/misc/xen-command-line.markdown
++++ b/docs/misc/xen-command-line.markdown
+@@ -1080,6 +1080,15 @@ limit is ignored by Xen.
+
+ Specify if the MMConfig space should be enabled.
+
++### mmio-relax
++> `= <boolean> | all`
++
++> Default: `false`
++
++By default, domains may not create cached mappings to MMIO regions.
++This option relaxes the check for Domain 0 (or when using `all`, all PV
++domains), to permit the use of cacheable MMIO mappings.
++
+ ### msi
+ > `= <boolean>`
+
+--- a/xen/arch/x86/hvm/mtrr.c
++++ b/xen/arch/x86/hvm/mtrr.c
+@@ -807,8 +807,17 @@ int epte_get_entry_emt(struct domain *d,
+ if ( v->domain != d )
+ v = d->vcpu ? d->vcpu[0] : NULL;
+
+- if ( !mfn_valid(mfn_x(mfn)) )
++ if ( !mfn_valid(mfn_x(mfn)) ||
++ rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
++ mfn_x(mfn) + (1UL << order) - 1) )
++ {
++ *ipat = 1;
+ return MTRR_TYPE_UNCACHABLE;
++ }
++
++ if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
++ mfn_x(mfn) + (1UL << order) - 1) )
++ return -1;
+
+ switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+ {
+--- a/xen/arch/x86/mm/p2m-pt.c
++++ b/xen/arch/x86/mm/p2m-pt.c
+@@ -107,6 +107,8 @@ static unsigned long p2m_type_to_flags(p
+ case p2m_mmio_direct:
+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
+ flags |= _PAGE_RW;
++ else
++ flags |= _PAGE_PWT;
+ return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+ }
+ }
+--- a/xen/arch/x86/mm/shadow/multi.c
++++ b/xen/arch/x86/mm/shadow/multi.c
+@@ -519,6 +519,7 @@ _sh_propagate(struct vcpu *v,
+ gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
+ u32 pass_thru_flags;
+ u32 gflags, sflags;
++ bool_t mmio_mfn;
+
+ /* We don't shadow PAE l3s */
+ ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
+@@ -559,7 +560,10 @@ _sh_propagate(struct vcpu *v,
+ // mfn means that we can not usefully shadow anything, and so we
+ // return early.
+ //
+- if ( !mfn_valid(target_mfn)
++ mmio_mfn = !mfn_valid(target_mfn)
++ || (level == 1
++ && page_get_owner(mfn_to_page(target_mfn)) == dom_io);
++ if ( mmio_mfn
+ && !(level == 1 && (!shadow_mode_refcounts(d)
+ || p2mt == p2m_mmio_direct)) )
+ {
+@@ -577,7 +581,7 @@ _sh_propagate(struct vcpu *v,
+ _PAGE_RW | _PAGE_PRESENT);
+ if ( guest_supports_nx(v) )
+ pass_thru_flags |= _PAGE_NX_BIT;
+- if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
++ if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
+ pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
+ sflags = gflags & pass_thru_flags;
+
+@@ -676,10 +680,14 @@ _sh_propagate(struct vcpu *v,
+ }
+
+ /* Read-only memory */
+- if ( p2m_is_readonly(p2mt) ||
+- (p2mt == p2m_mmio_direct &&
+- rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
++ if ( p2m_is_readonly(p2mt) )
+ sflags &= ~_PAGE_RW;
++ else if ( p2mt == p2m_mmio_direct &&
++ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn)) )
++ {
++ sflags &= ~(_PAGE_RW | _PAGE_PAT);
++ sflags |= _PAGE_PCD | _PAGE_PWT;
++ }
+
+ // protect guest page tables
+ //
+@@ -1185,22 +1193,28 @@ static int shadow_set_l1e(struct domain
+ && !sh_l1e_is_magic(new_sl1e) )
+ {
+ /* About to install a new reference */
+- if ( shadow_mode_refcounts(d) ) {
++ if ( shadow_mode_refcounts(d) )
++ {
++#define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
++ int rc;
++
+ TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
+- switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
++ switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) )
+ {
+ default:
+ /* Doesn't look like a pagetable. */
+ flags |= SHADOW_SET_ERROR;
+ new_sl1e = shadow_l1e_empty();
+ break;
+- case 1:
+- shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
++ case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE:
++ ASSERT(!(rc & ~PAGE_FLIPPABLE));
++ new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
+ /* fall through */
+ case 0:
+ shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
+ break;
+ }
++#undef PAGE_FLIPPABLE
+ }
+ }
+
+--- a/xen/arch/x86/mm/shadow/types.h
++++ b/xen/arch/x86/mm/shadow/types.h
+@@ -99,6 +99,9 @@ static inline u32 shadow_l4e_get_flags(s
+ static inline shadow_l1e_t
+ shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
+ { l1e_remove_flags(sl1e, flags); return sl1e; }
++static inline shadow_l1e_t
++shadow_l1e_flip_flags(shadow_l1e_t sl1e, u32 flags)
++{ l1e_flip_flags(sl1e, flags); return sl1e; }
+
+ static inline shadow_l1e_t shadow_l1e_empty(void)
+ { return l1e_empty(); }
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -178,6 +178,18 @@ static uint32_t base_disallow_mask;
+ is_pv_domain(d)) ? \
+ L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
+
++static s8 __read_mostly opt_mmio_relax;
++static void __init parse_mmio_relax(const char *s)
++{
++ if ( !*s )
++ opt_mmio_relax = 1;
++ else
++ opt_mmio_relax = parse_bool(s);
++ if ( opt_mmio_relax < 0 && strcmp(s, "all") )
++ opt_mmio_relax = 0;
++}
++custom_param("mmio-relax", parse_mmio_relax);
++
+ static void __init init_frametable_chunk(void *start, void *end)
+ {
+ unsigned long s = (unsigned long)start;
+@@ -799,10 +811,7 @@ get_page_from_l1e(
+ if ( !mfn_valid(mfn) ||
+ (real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
+ {
+-#ifndef NDEBUG
+- const unsigned long *ro_map;
+- unsigned int seg, bdf;
+-#endif
++ int flip = 0;
+
+ /* Only needed the reference to confirm dom_io ownership. */
+ if ( mfn_valid(mfn) )
+@@ -836,24 +845,55 @@ get_page_from_l1e(
+ return -EINVAL;
+ }
+
+- if ( !(l1f & _PAGE_RW) ||
+- !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
+- return 0;
++ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
++ {
++ /* MMIO pages must not be mapped cachable unless requested so. */
++ switch ( opt_mmio_relax )
++ {
++ case 0:
++ break;
++ case 1:
++ if ( is_hardware_domain(l1e_owner) )
++ case -1:
++ return 0;
++ default:
++ ASSERT_UNREACHABLE();
++ }
++ }
++ else if ( l1f & _PAGE_RW )
++ {
+ #ifndef NDEBUG
+- if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
+- ((ro_map = pci_get_ro_map(seg)) != NULL &&
+- test_bit(bdf, ro_map)) )
+- printk(XENLOG_G_WARNING
+- "d%d: Forcing read-only access to MFN %lx\n",
+- l1e_owner->domain_id, mfn);
+- else
+- rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
+- print_mmio_emul_range,
+- &(struct mmio_emul_range_ctxt){
+- .d = l1e_owner,
+- .mfn = mfn });
++ const unsigned long *ro_map;
++ unsigned int seg, bdf;
++
++ if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
++ ((ro_map = pci_get_ro_map(seg)) != NULL &&
++ test_bit(bdf, ro_map)) )
++ printk(XENLOG_G_WARNING
++ "d%d: Forcing read-only access to MFN %lx\n",
++ l1e_owner->domain_id, mfn);
++ else
++ rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
++ print_mmio_emul_range,
++ &(struct mmio_emul_range_ctxt){
++ .d = l1e_owner,
++ .mfn = mfn });
+ #endif
+- return 1;
++ flip = _PAGE_RW;
++ }
++
++ switch ( l1f & PAGE_CACHE_ATTRS )
++ {
++ case 0: /* WB */
++ flip |= _PAGE_PWT | _PAGE_PCD;
++ break;
++ case _PAGE_PWT: /* WT */
++ case _PAGE_PWT | _PAGE_PAT: /* WP */
++ flip |= _PAGE_PCD | (l1f & _PAGE_PAT);
++ break;
++ }
++
++ return flip;
+ }
+
+ if ( unlikely( (real_pg_owner != pg_owner) &&
+@@ -1243,8 +1283,9 @@ static int alloc_l1_table(struct page_in
+ goto fail;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(pl1e[i], _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(pl1e[i], ret);
+ break;
+ }
+
+@@ -1759,8 +1800,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping, r/w and presence. */
+- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
++ /* Fast path for identical mapping, r/w, presence, and cachability. */
++ if ( !l1e_has_changed(ol1e, nl1e,
++ PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
+ {
+ adjust_guest_l1e(nl1e, pt_dom);
+ if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
+@@ -1783,8 +1825,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
+ return rc;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(nl1e, _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(rc & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(nl1e, rc);
+ rc = 0;
+ break;
+ }
+@@ -5000,6 +5043,7 @@ static int ptwr_emulated_update(
+ l1_pgentry_t pte, ol1e, nl1e, *pl1e;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
++ int ret;
+
+ /* Only allow naturally-aligned stores within the original %cr2 page. */
+ if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
+@@ -5047,7 +5091,7 @@ static int ptwr_emulated_update(
+
+ /* Check the new PTE. */
+ nl1e = l1e_from_intpte(val);
+- switch ( get_page_from_l1e(nl1e, d, d) )
++ switch ( ret = get_page_from_l1e(nl1e, d, d) )
+ {
+ default:
+ if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
+@@ -5071,8 +5115,9 @@ static int ptwr_emulated_update(
+ break;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(nl1e, _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(nl1e, ret);
+ break;
+ }
+
+--- a/xen/include/asm-x86/page.h
++++ b/xen/include/asm-x86/page.h
+@@ -157,6 +157,9 @@ static inline l4_pgentry_t l4e_from_padd
+ #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
+ #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
+
++/* Flip flags in an existing L1 PTE. */
++#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags))
++
+ /* Check if a pte's page mapping or significant access flags have changed. */
+ #define l1e_has_changed(x,y,flags) \
+ ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
diff --git a/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch b/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
new file mode 100644
index 0000000000..7935e58c40
--- /dev/null
+++ b/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
@@ -0,0 +1,56 @@
+From 12b11658a9d6a654a1e7acbf2f2d56ce9a396c86 Mon Sep 17 00:00:00 2001
+From: David Vrabel <david.vrabel@citrix.com>
+Date: Fri, 20 Nov 2015 11:59:05 -0500
+Subject: [PATCH 1/3] xen: Add RING_COPY_REQUEST()
+
+Using RING_GET_REQUEST() on a shared ring is easy to use incorrectly
+(i.e., by not considering that the other end may alter the data in the
+shared ring while it is being inspected). Safe usage of a request
+generally requires taking a local copy.
+
+Provide a RING_COPY_REQUEST() macro to use instead of
+RING_GET_REQUEST() and an open-coded memcpy(). This takes care of
+ensuring that the copy is done correctly regardless of any possible
+compiler optimizations.
+
+Use a volatile source to prevent the compiler from reordering or
+omitting the copy.
+
+This is part of XSA155.
+
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+---
+v2: Add comment about GCC bug.
+---
+ xen/include/public/io/ring.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/xen/include/public/io/ring.h b/xen/include/public/io/ring.h
+index ba9401b..801c0da 100644
+--- a/xen/include/public/io/ring.h
++++ b/xen/include/public/io/ring.h
+@@ -212,6 +212,20 @@ typedef struct __name##_back_ring __name##_back_ring_t
+ #define RING_GET_REQUEST(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+
++/*
++ * Get a local copy of a request.
++ *
++ * Use this in preference to RING_GET_REQUEST() so all processing is
++ * done on a local copy that cannot be modified by the other end.
++ *
++ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
++ * to be ineffective where _req is a struct which consists of only bitfields.
++ */
++#define RING_COPY_REQUEST(_r, _idx, _req) do { \
++ /* Use volatile to force the copy into _req. */ \
++ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
++} while (0)
++
+ #define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
+--
+2.1.0
+
diff --git a/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch b/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
new file mode 100644
index 0000000000..2d80a7bd43
--- /dev/null
+++ b/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
@@ -0,0 +1,75 @@
+From 851ffb4eea917e2708c912291dea4d133026c0ac Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 20 Nov 2015 12:16:02 -0500
+Subject: [PATCH 2/3] blktap2: Use RING_COPY_REQUEST
+
+Instead of RING_GET_REQUEST. Using a local copy of the
+ring (and also with proper memory barriers) will mean
+we can do not have to worry about the compiler optimizing
+the code and doing a double-fetch in the shared memory space.
+
+This is part of XSA155.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+---
+v2: Fix compile issues with tapdisk-vbd
+---
+ tools/blktap2/drivers/block-log.c | 3 ++-
+ tools/blktap2/drivers/tapdisk-vbd.c | 8 ++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/tools/blktap2/drivers/block-log.c b/tools/blktap2/drivers/block-log.c
+index 5330cdc..5f3bd35 100644
+--- a/tools/blktap2/drivers/block-log.c
++++ b/tools/blktap2/drivers/block-log.c
+@@ -494,11 +494,12 @@ static int ctl_kick(struct tdlog_state* s, int fd)
+ reqstart = s->bring.req_cons;
+ reqend = s->sring->req_prod;
+
++ xen_mb();
+ BDPRINTF("ctl: ring kicked (start = %u, end = %u)", reqstart, reqend);
+
+ while (reqstart != reqend) {
+ /* XXX actually submit these! */
+- memcpy(&req, RING_GET_REQUEST(&s->bring, reqstart), sizeof(req));
++ RING_COPY_REQUEST(&s->bring, reqstart, &req);
+ BDPRINTF("ctl: read request %"PRIu64":%u", req.sector, req.count);
+ s->bring.req_cons = ++reqstart;
+
+diff --git a/tools/blktap2/drivers/tapdisk-vbd.c b/tools/blktap2/drivers/tapdisk-vbd.c
+index 6d1d94a..89ef9ed 100644
+--- a/tools/blktap2/drivers/tapdisk-vbd.c
++++ b/tools/blktap2/drivers/tapdisk-vbd.c
+@@ -1555,7 +1555,7 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
+ int idx;
+ RING_IDX rp, rc;
+ td_ring_t *ring;
+- blkif_request_t *req;
++ blkif_request_t req;
+ td_vbd_request_t *vreq;
+
+ ring = &vbd->ring;
+@@ -1566,16 +1566,16 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
+ xen_rmb();
+
+ for (rc = ring->fe_ring.req_cons; rc != rp; rc++) {
+- req = RING_GET_REQUEST(&ring->fe_ring, rc);
++ RING_COPY_REQUEST(&ring->fe_ring, rc, &req);
+ ++ring->fe_ring.req_cons;
+
+- idx = req->id;
++ idx = req.id;
+ vreq = &vbd->request_list[idx];
+
+ ASSERT(list_empty(&vreq->next));
+ ASSERT(vreq->secs_pending == 0);
+
+- memcpy(&vreq->req, req, sizeof(blkif_request_t));
++ memcpy(&vreq->req, &req, sizeof(blkif_request_t));
+ vbd->received++;
+ vreq->vbd = vbd;
+
+--
+2.1.4
+
diff --git a/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch b/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
new file mode 100644
index 0000000000..56a6e538f4
--- /dev/null
+++ b/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
@@ -0,0 +1,41 @@
+From c1fce65e2b720684ea6ba76ae59921542bd154bb Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 20 Nov 2015 12:22:14 -0500
+Subject: [PATCH 3/3] libvchan: Read prod/cons only once.
+
+We must ensure that the prod/cons are only read once and that
+the compiler won't try to optimize the reads. That is split
+the read of these in multiple instructions influencing later
+branch code. As such insert barriers when fetching the cons
+and prod index.
+
+This is part of XSA155.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+---
+ tools/libvchan/io.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/libvchan/io.c b/tools/libvchan/io.c
+index 8a9629b..381cc05 100644
+--- a/tools/libvchan/io.c
++++ b/tools/libvchan/io.c
+@@ -117,6 +117,7 @@ static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit)
+ static inline int raw_get_data_ready(struct libxenvchan *ctrl)
+ {
+ uint32_t ready = rd_prod(ctrl) - rd_cons(ctrl);
++ xen_mb(); /* Ensure 'ready' is read only once. */
+ if (ready > rd_ring_size(ctrl))
+ /* We have no way to return errors. Locking up the ring is
+ * better than the alternatives. */
+@@ -158,6 +159,7 @@ int libxenvchan_data_ready(struct libxenvchan *ctrl)
+ static inline int raw_get_buffer_space(struct libxenvchan *ctrl)
+ {
+ uint32_t ready = wr_ring_size(ctrl) - (wr_prod(ctrl) - wr_cons(ctrl));
++ xen_mb(); /* Ensure 'ready' is read only once. */
+ if (ready > wr_ring_size(ctrl))
+ /* We have no way to return errors. Locking up the ring is
+ * better than the alternatives. */
+--
+2.1.0
+
diff --git a/system/xen/patches/xsa162-qemuu.patch b/system/xen/patches/xsa162-qemuu.patch
new file mode 100644
index 0000000000..2e3352d88b
--- /dev/null
+++ b/system/xen/patches/xsa162-qemuu.patch
@@ -0,0 +1,42 @@
+net: pcnet: add check to validate receive data size(CVE-2015-7504)
+
+In loopback mode, pcnet_receive routine appends CRC code to the
+receive buffer. If the data size given is same as the buffer size,
+the appended CRC code overwrites 4 bytes after s->buffer. Added a
+check to avoid that.
+
+Reported-by: Qinghao Tang <luodalongde@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+---
+ hw/net/pcnet.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
+index 3437376..5f55591 100644
+--- a/hw/net/pcnet.c
++++ b/hw/net/pcnet.c
+@@ -1085,7 +1085,7 @@ ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
+ uint32_t fcs = ~0;
+ uint8_t *p = src;
+
+- while (p != &src[size-4])
++ while (p != &src[size])
+ CRC(fcs, *p++);
+ crc_err = (*(uint32_t *)p != htonl(fcs));
+ }
+@@ -1234,8 +1234,10 @@ static void pcnet_transmit(PCNetState *s)
+ bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
+
+ /* if multi-tmd packet outsizes s->buffer then skip it silently.
+- Note: this is not what real hw does */
+- if (s->xmit_pos + bcnt > sizeof(s->buffer)) {
++ * Note: this is not what real hw does.
++ * Last four bytes of s->buffer are used to store CRC FCS code.
++ */
++ if (s->xmit_pos + bcnt > sizeof(s->buffer) - 4) {
+ s->xmit_pos = -1;
+ goto txdone;
+ }
+--
+2.4.3
+
diff --git a/system/xen/patches/xsa170.patch b/system/xen/patches/xsa170.patch
new file mode 100644
index 0000000000..f71fa19130
--- /dev/null
+++ b/system/xen/patches/xsa170.patch
@@ -0,0 +1,79 @@
+x86/VMX: sanitize rIP before re-entering guest
+
+... to prevent guest user mode arranging for a guest crash (due to
+failed VM entry). (On the AMD system I checked, hardware is doing
+exactly the canonicalization being added here.)
+
+Note that fixing this in an architecturally correct way would be quite
+a bit more involved: Making the x86 instruction emulator check all
+branch targets for validity, plus dealing with invalid rIP resulting
+from update_guest_eip() or incoming directly during a VM exit. The only
+way to get the latter right would be by not having hardware do the
+injection.
+
+Note further that there are a two early returns from
+vmx_vmexit_handler(): One (through vmx_failed_vmentry()) leads to
+domain_crash() anyway, and the other covers real mode only and can
+neither occur with a non-canonical rIP nor result in an altered rIP,
+so we don't need to force those paths through the checking logic.
+
+This is XSA-170.
+
+Reported-by: 刘令 <liuling-it@360.cn>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/vmx/vmx.c
++++ b/xen/arch/x86/hvm/vmx/vmx.c
+@@ -2968,7 +2968,7 @@ static int vmx_handle_apic_write(void)
+ void vmx_vmexit_handler(struct cpu_user_regs *regs)
+ {
+ unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
+- unsigned int vector = 0;
++ unsigned int vector = 0, mode;
+ struct vcpu *v = current;
+
+ __vmread(GUEST_RIP, &regs->rip);
+@@ -3566,6 +3566,41 @@ void vmx_vmexit_handler(struct cpu_user_
+ out:
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ nvmx_idtv_handling();
++
++ /*
++ * VM entry will fail (causing the guest to get crashed) if rIP (and
++ * rFLAGS, but we don't have an issue there) doesn't meet certain
++ * criteria. As we must not allow less than fully privileged mode to have
++ * such an effect on the domain, we correct rIP in that case (accepting
++ * this not being architecturally correct behavior, as the injected #GP
++ * fault will then not see the correct [invalid] return address).
++ * And since we know the guest will crash, we crash it right away if it
++ * already is in most privileged mode.
++ */
++ mode = vmx_guest_x86_mode(v);
++ if ( mode == 8 ? !is_canonical_address(regs->rip)
++ : regs->rip != regs->_eip )
++ {
++ struct segment_register ss;
++
++ gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
++
++ vmx_get_segment_register(v, x86_seg_ss, &ss);
++ if ( ss.attr.fields.dpl )
++ {
++ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
++ if ( !(intr_info & INTR_INFO_VALID_MASK) )
++ hvm_inject_hw_exception(TRAP_gp_fault, 0);
++ /* Need to fix rIP nevertheless. */
++ if ( mode == 8 )
++ regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
++ (64 - VADDR_BITS);
++ else
++ regs->rip = regs->_eip;
++ }
++ else
++ domain_crash(v->domain);
++ }
+ }
+
+ void vmx_vmenter_helper(const struct cpu_user_regs *regs)