powerc/mm/hash: Reduce hash_mm_context size

Allocate subpage protect related variables only if we use the feature.
This helps in reducing the hash related mm context struct by around 4K

Before the patch
sizeof(struct hash_mm_context)  = 8288

After the patch
sizeof(struct hash_mm_context) = 4160

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Aneesh Kumar K.V 2019-04-17 18:33:51 +05:30 committed by Michael Ellerman
parent 701101865f
commit ef629cc5bf
5 changed files with 40 additions and 14 deletions

View File

@ -687,10 +687,8 @@ struct subpage_prot_table {
#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
extern void subpage_prot_free(struct mm_struct *mm);
extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */
/*
@ -720,7 +718,7 @@ struct hash_mm_context {
#endif
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
struct subpage_prot_table *spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
};

View File

@ -206,7 +206,7 @@ static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
#ifdef CONFIG_PPC_SUBPAGE_PROT
static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
{
return &ctx->hash_context->spt;
return ctx->hash_context->spt;
}
#endif

View File

@ -1150,6 +1150,9 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
u32 spp = 0;
u32 **sbpm, *sbpp;
if (!spt)
return 0;
if (ea >= spt->maxaddr)
return 0;
if (ea < 0x100000000UL) {

View File

@ -63,7 +63,8 @@ static int hash__init_new_context(struct mm_struct *mm)
if (index < 0)
return index;
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL);
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL);
if (!mm->context.hash_context) {
ida_free(&mmu_context_ida, index);
return -ENOMEM;
@ -89,11 +90,21 @@ static int hash__init_new_context(struct mm_struct *mm)
} else {
/* This is fork. Copy hash_context details from current->mm */
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
#ifdef CONFIG_PPC_SUBPAGE_PROT
/* inherit subpage prot detalis if we have one. */
if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
if (!mm->context.hash_context->spt) {
ida_free(&mmu_context_ida, index);
kfree(mm->context.hash_context);
return -ENOMEM;
}
}
#endif
}
subpage_prot_init_new_context(mm);
pkey_mm_init(mm);
return index;
}

View File

@ -29,6 +29,9 @@ void subpage_prot_free(struct mm_struct *mm)
unsigned long i, j, addr;
u32 **p;
if (!spt)
return;
for (i = 0; i < 4; ++i) {
if (spt->low_prot[i]) {
free_page((unsigned long)spt->low_prot[i]);
@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm)
free_page((unsigned long)p);
}
spt->maxaddr = 0;
}
void subpage_prot_init_new_context(struct mm_struct *mm)
{
struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
memset(spt, 0, sizeof(*spt));
kfree(spt);
}
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
@ -99,6 +96,9 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
if (!spt)
return ;
down_write(&mm->mmap_sem);
limit = addr + len;
if (limit > spt->maxaddr)
@ -218,6 +218,20 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
return -EFAULT;
down_write(&mm->mmap_sem);
if (!spt) {
/*
* Allocate subpage prot table if not already done.
* Do this with mmap_sem held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
if (!spt) {
err = -ENOMEM;
goto out;
}
mm->context.hash_context->spt = spt;
}
subpage_mark_vma_nohuge(mm, addr, len);
for (limit = addr + len; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit);