x86/ldt: Unmap PTEs for the slot before freeing LDT pages
commita0e6e0831c
upstream modify_ldt(2) leaves the old LDT mapped after switching over to the new one. The old LDT gets freed and the pages can be re-used. Leaving the mapping in place can have security implications. The mapping is present in the userspace page tables and Meltdown-like attacks can read these freed and possibly reused pages. It's relatively simple to fix: unmap the old LDT and flush TLB before freeing the old LDT memory. This further allows to avoid flushing the TLB in map_ldt_struct() as the slot is unmapped and flushed by unmap_ldt_struct() or has never been mapped at all. [ tglx: Massaged changelog and removed the needless line breaks ] Fixes:f55f0501cb
("x86/pti: Put the LDT in its own PGD if PTI is on") Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: bp@alien8.de Cc: hpa@zytor.com Cc: dave.hansen@linux.intel.com Cc: luto@kernel.org Cc: peterz@infradead.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: bhe@redhat.com Cc: willy@infradead.org Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181026122856.66224-3-kirill.shutemov@linux.intel.com Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
d412ab7cfc
commit
a17989cb9b
@ -103,14 +103,6 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
|
|||||||
/*
|
/*
|
||||||
* If PTI is enabled, this maps the LDT into the kernelmode and
|
* If PTI is enabled, this maps the LDT into the kernelmode and
|
||||||
* usermode tables for the given mm.
|
* usermode tables for the given mm.
|
||||||
*
|
|
||||||
* There is no corresponding unmap function. Even if the LDT is freed, we
|
|
||||||
* leave the PTEs around until the slot is reused or the mm is destroyed.
|
|
||||||
* This is harmless: the LDT is always in ordinary memory, and no one will
|
|
||||||
* access the freed slot.
|
|
||||||
*
|
|
||||||
* If we wanted to unmap freed LDTs, we'd also need to do a flush to make
|
|
||||||
* it useful, and the flush would slow down modify_ldt().
|
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
||||||
@ -119,8 +111,8 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
|||||||
bool is_vmalloc, had_top_level_entry;
|
bool is_vmalloc, had_top_level_entry;
|
||||||
unsigned long va;
|
unsigned long va;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
int i, nr_pages;
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!static_cpu_has(X86_FEATURE_PTI))
|
if (!static_cpu_has(X86_FEATURE_PTI))
|
||||||
return 0;
|
return 0;
|
||||||
@ -141,7 +133,9 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
|||||||
|
|
||||||
is_vmalloc = is_vmalloc_addr(ldt->entries);
|
is_vmalloc = is_vmalloc_addr(ldt->entries);
|
||||||
|
|
||||||
for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
|
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
unsigned long offset = i << PAGE_SHIFT;
|
unsigned long offset = i << PAGE_SHIFT;
|
||||||
const void *src = (char *)ldt->entries + offset;
|
const void *src = (char *)ldt->entries + offset;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
@ -189,14 +183,42 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
va = (unsigned long)ldt_slot_va(slot);
|
|
||||||
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
|
|
||||||
|
|
||||||
ldt->slot = slot;
|
ldt->slot = slot;
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||||
|
unsigned long va;
|
||||||
|
int i, nr_pages;
|
||||||
|
|
||||||
|
if (!ldt)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* LDT map/unmap is only required for PTI */
|
||||||
|
if (!static_cpu_has(X86_FEATURE_PTI))
|
||||||
|
return;
|
||||||
|
|
||||||
|
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
unsigned long offset = i << PAGE_SHIFT;
|
||||||
|
spinlock_t *ptl;
|
||||||
|
pte_t *ptep;
|
||||||
|
|
||||||
|
va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
|
||||||
|
ptep = get_locked_pte(mm, va, &ptl);
|
||||||
|
pte_clear(mm, va, ptep);
|
||||||
|
pte_unmap_unlock(ptep, ptl);
|
||||||
|
}
|
||||||
|
|
||||||
|
va = (unsigned long)ldt_slot_va(ldt->slot);
|
||||||
|
flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0);
|
||||||
|
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
|
||||||
|
}
|
||||||
|
|
||||||
static void free_ldt_pgtables(struct mm_struct *mm)
|
static void free_ldt_pgtables(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||||
@ -433,6 +455,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
install_ldt(mm, new_ldt);
|
install_ldt(mm, new_ldt);
|
||||||
|
unmap_ldt_struct(mm, old_ldt);
|
||||||
free_ldt_struct(old_ldt);
|
free_ldt_struct(old_ldt);
|
||||||
error = 0;
|
error = 0;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user