s390/mm: add missing arch_set_page_dat() call to gmap allocations

If the cmma no-dat feature is available all pages that are not used for
dynamic address translation are marked as "no-dat" with the ESSA
instruction. This information is visible to the hypervisor, so that the
hypervisor can optimize purging of guest TLB entries. This also means that
pages which are used for dynamic address translation must not be marked as
"no-dat", since the hypervisor may then incorrectly not purge guest TLB
entries.

Region, segment, and page tables allocated within the gmap code are
incorrectly marked as "no-dat", since an explicit call to
arch_set_page_dat() is missing, which would remove the "no-dat" mark.

In order to fix this add a new gmap_alloc_crst() function which should
be used to allocate region and segment tables, and which also calls
arch_set_page_dat().

Also add the arch_set_page_dat() call to page_table_alloc_pgste().

Cc: <stable@vger.kernel.org>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2023-10-20 17:26:50 +02:00 committed by Vasily Gorbik
parent 09cda0a400
commit 1954da4a2b
2 changed files with 19 additions and 6 deletions

View File

@ -21,10 +21,22 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/gmap.h> #include <asm/gmap.h>
#include <asm/page.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#define GMAP_SHADOW_FAKE_TABLE 1ULL #define GMAP_SHADOW_FAKE_TABLE 1ULL
static struct page *gmap_alloc_crst(void)
{
struct page *page;
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return NULL;
arch_set_page_dat(page, CRST_ALLOC_ORDER);
return page;
}
/** /**
* gmap_alloc - allocate and initialize a guest address space * gmap_alloc - allocate and initialize a guest address space
* @limit: maximum address of the gmap address space * @limit: maximum address of the gmap address space
@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
spin_lock_init(&gmap->guest_table_lock); spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock); spin_lock_init(&gmap->shadow_lock);
refcount_set(&gmap->ref_count, 1); refcount_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
goto out_free; goto out_free;
page->index = 0; page->index = 0;
@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long *new; unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */ /* since we dont free the gmap table until gmap_free we can unlock */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
new = page_to_virt(page); new = page_to_virt(page);
@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
BUG_ON(!gmap_is_shadow(sg)); BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */ /* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN; page->index = r2t & _REGION_ENTRY_ORIGIN;
@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
BUG_ON(!gmap_is_shadow(sg)); BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */ /* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN; page->index = r3t & _REGION_ENTRY_ORIGIN;
@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */ /* Allocate a shadow segment table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN; page->index = sgt & _REGION_ENTRY_ORIGIN;
@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL; return -EINVAL;
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); page = gmap_alloc_crst();
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = 0; page->index = 0;

View File

@ -146,6 +146,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
ptdesc = pagetable_alloc(GFP_KERNEL, 0); ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (ptdesc) { if (ptdesc) {
table = (u64 *)ptdesc_to_virt(ptdesc); table = (u64 *)ptdesc_to_virt(ptdesc);
arch_set_page_dat(virt_to_page(table), 0);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE); memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} }