summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa/xsa246-4.9.patch
diff options
context:
space:
mode:
author Mario Preksavec <mario@slackware.hr>2017-12-14 02:37:37 +0100
committer Willy Sudiarto Raharjo <willysr@slackbuilds.org>2017-12-16 07:32:13 +0700
commit83300a98e0acfbd286414f7765497ab5743cff01 (patch)
tree8308a482925cdff150b296ec1e2ac7307072979b /system/xen/xsa/xsa246-4.9.patch
parent2acfdbef50be3643842190f87b5be89181e27839 (diff)
downloadslackbuilds-83300a98e0acfbd286414f7765497ab5743cff01.tar.gz
slackbuilds-83300a98e0acfbd286414f7765497ab5743cff01.tar.xz
system/xen: XSA 246-251 update.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/xsa/xsa246-4.9.patch')
-rw-r--r--system/xen/xsa/xsa246-4.9.patch74
1 files changed, 74 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa246-4.9.patch b/system/xen/xsa/xsa246-4.9.patch
new file mode 100644
index 0000000000..6370a10625
--- /dev/null
+++ b/system/xen/xsa/xsa246-4.9.patch
@@ -0,0 +1,74 @@
+From: Julien Grall <julien.grall@linaro.org>
+Subject: x86/pod: prevent infinite loop when shattering large pages
+
+When populating pages, the PoD may need to split large ones using
+p2m_set_entry and request the caller to retry (see ept_get_entry for
+instance).
+
+p2m_set_entry may fail to shatter if it is not possible to allocate
+memory for the new page table. However, the error is not propagated
+resulting to the callers to retry infinitely the PoD.
+
+Prevent the infinite loop by return false when it is not possible to
+shatter the large mapping.
+
+This is XSA-246.
+
+Signed-off-by: Julien Grall <julien.grall@linaro.org>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
+ * NOTE: In a fine-grained p2m locking scenario this operation
+ * may need to promote its locking from gfn->1g superpage
+ */
+- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
+- p2m_populate_on_demand, p2m->default_access);
+- return 0;
++ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
++ p2m_populate_on_demand, p2m->default_access);
+ }
+
+ /* Only reclaim if we're in actual need of more cache. */
+@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
+
+ gfn_aligned = (gfn >> order) << order;
+
+- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+- p2m->default_access);
++ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
++ p2m->default_access) )
++ {
++ p2m_pod_cache_add(p2m, p, order);
++ goto out_fail;
++ }
+
+ for( i = 0; i < (1UL << order); i++ )
+ {
+@@ -1150,13 +1153,18 @@ remap_and_retry:
+ BUG_ON(order != PAGE_ORDER_2M);
+ pod_unlock(p2m);
+
+- /* Remap this 2-meg region in singleton chunks */
+- /* NOTE: In a p2m fine-grained lock scenario this might
+- * need promoting the gfn lock from gfn->2M superpage */
++ /*
++ * Remap this 2-meg region in singleton chunks. See the comment on the
++ * 1G page splitting path above for why a single call suffices.
++ *
++ * NOTE: In a p2m fine-grained lock scenario this might
++ * need promoting the gfn lock from gfn->2M superpage.
++ */
+ gfn_aligned = (gfn>>order)<<order;
+- for(i=0; i<(1<<order); i++)
+- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
+- p2m_populate_on_demand, p2m->default_access);
++ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
++ p2m_populate_on_demand, p2m->default_access) )
++ return -1;
++
+ if ( tb_init_done )
+ {
+ struct {