powerepc/book3s64/hash: Align start/end address correctly with bolt mapping
This ensures we don't do a partial mapping of memory. With nvdimm, when creating namespaces with size not aligned to 16MB, the kernel ends up partially mapping the pages. This can result in kernel adding multiple hash page table entries for the same range. A new namespace will result in create_section_mapping() with start and end overlapping an already existing bolted hash page table entry. commit: 6acd7d5ef264 ("libnvdimm/namespace: Enforce memremap_compat_align()") made sure that we always create namespaces aligned to 16MB. But we can do better by avoiding mapping pages that are not aligned. This helps to catch access to these partially mapped pages early. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200907072539.67310-1-aneesh.kumar@linux.ibm.com
This commit is contained in:
parent
bbc4f40b53
commit
79b123cdf9
@ -260,8 +260,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
||||
DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
|
||||
vstart, vend, pstart, prot, psize, ssize);
|
||||
|
||||
for (vaddr = vstart, paddr = pstart; vaddr < vend;
|
||||
vaddr += step, paddr += step) {
|
||||
/* Carefully map only the possible range */
|
||||
vaddr = ALIGN(vstart, step);
|
||||
paddr = ALIGN(pstart, step);
|
||||
vend = ALIGN_DOWN(vend, step);
|
||||
|
||||
for (; vaddr < vend; vaddr += step, paddr += step) {
|
||||
unsigned long hash, hpteg;
|
||||
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
|
||||
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
|
||||
@ -343,7 +347,9 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
|
||||
if (!mmu_hash_ops.hpte_removebolted)
|
||||
return -ENODEV;
|
||||
|
||||
for (vaddr = vstart; vaddr < vend; vaddr += step) {
|
||||
/* Unmap the full range specificied */
|
||||
vaddr = ALIGN_DOWN(vstart, step);
|
||||
for (;vaddr < vend; vaddr += step) {
|
||||
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
|
||||
if (rc == -ENOENT) {
|
||||
ret = -ENOENT;
|
||||
|
@ -276,6 +276,7 @@ static int __meminit create_physical_mapping(unsigned long start,
|
||||
int psize;
|
||||
|
||||
start = ALIGN(start, PAGE_SIZE);
|
||||
end = ALIGN_DOWN(end, PAGE_SIZE);
|
||||
for (addr = start; addr < end; addr += mapping_size) {
|
||||
unsigned long gap, previous_size;
|
||||
int rc;
|
||||
|
Loading…
x
Reference in New Issue
Block a user