s390/boot: Swap vmalloc and Lowcore/Real Memory Copy areas
This is a preparatory rework to allow uncoupling virtual and physical addresses spaces. Currently the order of virtual memory areas is (the lowcore and .amode31 section are skipped, as it is irrelevant): identity mapping (the kernel is contained within) vmemmap vmalloc modules Absolute Lowcore Real Memory Copy In the future the kernel will be mapped separately and placed to the end of the virtual address space, so the layout would turn like this: identity mapping vmemmap vmalloc modules Absolute Lowcore Real Memory Copy kernel However, the distance between kernel and modules needs to be as little as possible, ideally - none. Thus, the Absolute Lowcore and Real Memory Copy areas would stay in the way and therefore need to be moved as well: identity mapping vmemmap Absolute Lowcore Real Memory Copy vmalloc modules kernel To facilitate such layout swap the vmalloc and Absolute Lowcore together with Real Memory Copy areas. As result, the current layout turns into: identity mapping (the kernel is contained within) vmemmap Absolute Lowcore Real Memory Copy vmalloc modules This will allow to locate the kernel directly next to the modules once it gets mapped separately. Acked-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
ecf74da64d
commit
c8aef260c8
@ -297,28 +297,30 @@ static unsigned long setup_kernel_memory_layout(void)
|
||||
/* force vmalloc and modules below kasan shadow */
|
||||
vmax = min(vmax, KASAN_SHADOW_START);
|
||||
#endif
|
||||
__memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
|
||||
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
|
||||
sizeof(struct lowcore));
|
||||
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
|
||||
MODULES_END = round_down(vmax, _SEGMENT_SIZE);
|
||||
MODULES_VADDR = MODULES_END - MODULES_LEN;
|
||||
VMALLOC_END = MODULES_VADDR;
|
||||
|
||||
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
|
||||
vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
|
||||
vsize = (VMALLOC_END - (MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE)) / 2;
|
||||
vsize = round_down(vsize, _SEGMENT_SIZE);
|
||||
vmalloc_size = min(vmalloc_size, vsize);
|
||||
VMALLOC_START = VMALLOC_END - vmalloc_size;
|
||||
|
||||
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
|
||||
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
|
||||
sizeof(struct lowcore));
|
||||
|
||||
/* split remaining virtual space between 1:1 mapping & vmemmap array */
|
||||
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
|
||||
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
|
||||
pages = SECTION_ALIGN_UP(pages);
|
||||
/* keep vmemmap_start aligned to a top level region table entry */
|
||||
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
|
||||
vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
|
||||
/* make sure identity map doesn't overlay with vmemmap */
|
||||
ident_map_size = min(ident_map_size, vmemmap_start);
|
||||
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
|
||||
/* make sure vmemmap doesn't overlay with vmalloc area */
|
||||
if (vmemmap_start + vmemmap_size > VMALLOC_START) {
|
||||
/* make sure vmemmap doesn't overlay with absolute lowcore area */
|
||||
if (vmemmap_start + vmemmap_size > __abs_lowcore) {
|
||||
vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
|
||||
ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/page-states.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/ctlreg.h>
|
||||
@ -436,7 +437,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
|
||||
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
|
||||
return -EINVAL;
|
||||
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
|
||||
if (WARN_ON_ONCE(end > VMALLOC_START))
|
||||
if (WARN_ON_ONCE(end > __abs_lowcore))
|
||||
return -EINVAL;
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
next = pgd_addr_end(addr, end);
|
||||
|
Loading…
x
Reference in New Issue
Block a user