summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0013-xen-arm-Construct-the-P2M-pages-pool-for-guests.patch')
-rw-r--r--0013-xen-arm-Construct-the-P2M-pages-pool-for-guests.patch189
1 files changed, 189 insertions, 0 deletions
diff --git a/0013-xen-arm-Construct-the-P2M-pages-pool-for-guests.patch b/0013-xen-arm-Construct-the-P2M-pages-pool-for-guests.patch
new file mode 100644
index 0000000..dee9d9c
--- /dev/null
+++ b/0013-xen-arm-Construct-the-P2M-pages-pool-for-guests.patch
@@ -0,0 +1,189 @@
+From 914fc8e8b4cc003e90d51bee0aef54687358530a Mon Sep 17 00:00:00 2001
+From: Henry Wang <Henry.Wang@arm.com>
+Date: Tue, 11 Oct 2022 14:55:21 +0200
+Subject: [PATCH 13/26] xen/arm: Construct the P2M pages pool for guests
+
+This commit constructs the p2m pages pool for guests from the
+data structure and helper perspective.
+
+This is implemented by:
+
+- Adding a `struct paging_domain` which contains a freelist, a
+counter variable and a spinlock to `struct arch_domain` to
+indicate the free p2m pages and the number of p2m total pages in
+the p2m pages pool.
+
+- Adding a helper `p2m_get_allocation` to get the p2m pool size.
+
+- Adding a helper `p2m_set_allocation` to set the p2m pages pool
+size. This helper should be called before allocating memory for
+a guest.
+
+- Adding a helper `p2m_teardown_allocation` to free the p2m pages
+pool. This helper should be called during the xl domain destory.
+
+This is part of CVE-2022-33747 / XSA-409.
+
+Signed-off-by: Henry Wang <Henry.Wang@arm.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+master commit: 55914f7fc91a468649b8a3ec3f53ae1c4aca6670
+master date: 2022-10-11 14:28:39 +0200
+---
+ xen/arch/arm/p2m.c | 88 ++++++++++++++++++++++++++++++++++++
+ xen/include/asm-arm/domain.h | 10 ++++
+ xen/include/asm-arm/p2m.h | 4 ++
+ 3 files changed, 102 insertions(+)
+
+diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
+index 27418ee5ee98..d8957dd8727c 100644
+--- a/xen/arch/arm/p2m.c
++++ b/xen/arch/arm/p2m.c
+@@ -50,6 +50,92 @@ static uint64_t generate_vttbr(uint16_t vmid, mfn_t root_mfn)
+ return (mfn_to_maddr(root_mfn) | ((uint64_t)vmid << 48));
+ }
+
++/* Return the size of the pool, rounded up to the nearest MB */
++unsigned int p2m_get_allocation(struct domain *d)
++{
++ unsigned long nr_pages = ACCESS_ONCE(d->arch.paging.p2m_total_pages);
++
++ return ROUNDUP(nr_pages, 1 << (20 - PAGE_SHIFT)) >> (20 - PAGE_SHIFT);
++}
++
++/*
++ * Set the pool of pages to the required number of pages.
++ * Returns 0 for success, non-zero for failure.
++ * Call with d->arch.paging.lock held.
++ */
++int p2m_set_allocation(struct domain *d, unsigned long pages, bool *preempted)
++{
++ struct page_info *pg;
++
++ ASSERT(spin_is_locked(&d->arch.paging.lock));
++
++ for ( ; ; )
++ {
++ if ( d->arch.paging.p2m_total_pages < pages )
++ {
++ /* Need to allocate more memory from domheap */
++ pg = alloc_domheap_page(NULL, 0);
++ if ( pg == NULL )
++ {
++ printk(XENLOG_ERR "Failed to allocate P2M pages.\n");
++ return -ENOMEM;
++ }
++ ACCESS_ONCE(d->arch.paging.p2m_total_pages) =
++ d->arch.paging.p2m_total_pages + 1;
++ page_list_add_tail(pg, &d->arch.paging.p2m_freelist);
++ }
++ else if ( d->arch.paging.p2m_total_pages > pages )
++ {
++ /* Need to return memory to domheap */
++ pg = page_list_remove_head(&d->arch.paging.p2m_freelist);
++ if( pg )
++ {
++ ACCESS_ONCE(d->arch.paging.p2m_total_pages) =
++ d->arch.paging.p2m_total_pages - 1;
++ free_domheap_page(pg);
++ }
++ else
++ {
++ printk(XENLOG_ERR
++ "Failed to free P2M pages, P2M freelist is empty.\n");
++ return -ENOMEM;
++ }
++ }
++ else
++ break;
++
++ /* Check to see if we need to yield and try again */
++ if ( preempted && general_preempt_check() )
++ {
++ *preempted = true;
++ return -ERESTART;
++ }
++ }
++
++ return 0;
++}
++
++int p2m_teardown_allocation(struct domain *d)
++{
++ int ret = 0;
++ bool preempted = false;
++
++ spin_lock(&d->arch.paging.lock);
++ if ( d->arch.paging.p2m_total_pages != 0 )
++ {
++ ret = p2m_set_allocation(d, 0, &preempted);
++ if ( preempted )
++ {
++ spin_unlock(&d->arch.paging.lock);
++ return -ERESTART;
++ }
++ ASSERT(d->arch.paging.p2m_total_pages == 0);
++ }
++ spin_unlock(&d->arch.paging.lock);
++
++ return ret;
++}
++
+ /* Unlock the flush and do a P2M TLB flush if necessary */
+ void p2m_write_unlock(struct p2m_domain *p2m)
+ {
+@@ -1599,7 +1685,9 @@ int p2m_init(struct domain *d)
+ unsigned int cpu;
+
+ rwlock_init(&p2m->lock);
++ spin_lock_init(&d->arch.paging.lock);
+ INIT_PAGE_LIST_HEAD(&p2m->pages);
++ INIT_PAGE_LIST_HEAD(&d->arch.paging.p2m_freelist);
+
+ p2m->vmid = INVALID_VMID;
+
+diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
+index 7f8ddd3f5c3b..2f31795ab96d 100644
+--- a/xen/include/asm-arm/domain.h
++++ b/xen/include/asm-arm/domain.h
+@@ -40,6 +40,14 @@ struct vtimer {
+ uint64_t cval;
+ };
+
++struct paging_domain {
++ spinlock_t lock;
++ /* Free P2M pages from the pre-allocated P2M pool */
++ struct page_list_head p2m_freelist;
++ /* Number of pages from the pre-allocated P2M pool */
++ unsigned long p2m_total_pages;
++};
++
+ struct arch_domain
+ {
+ #ifdef CONFIG_ARM_64
+@@ -51,6 +59,8 @@ struct arch_domain
+
+ struct hvm_domain hvm;
+
++ struct paging_domain paging;
++
+ struct vmmio vmmio;
+
+ /* Continuable domain_relinquish_resources(). */
+diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
+index b3ba83283e11..c9598740bd02 100644
+--- a/xen/include/asm-arm/p2m.h
++++ b/xen/include/asm-arm/p2m.h
+@@ -218,6 +218,10 @@ void p2m_restore_state(struct vcpu *n);
+ /* Print debugging/statistial info about a domain's p2m */
+ void p2m_dump_info(struct domain *d);
+
++unsigned int p2m_get_allocation(struct domain *d);
++int p2m_set_allocation(struct domain *d, unsigned long pages, bool *preempted);
++int p2m_teardown_allocation(struct domain *d);
++
+ static inline void p2m_write_lock(struct p2m_domain *p2m)
+ {
+ write_lock(&p2m->lock);
+--
+2.37.3
+