[PATCH] ppc64: Abolish ioremap_mm
Currently ppc64 has two mm_structs for the kernel, init_mm and also ioremap_mm. The latter really isn't necessary: this patch abolishes it, instead restricting vmallocs to the lower 1TB of the init_mm's range and placing io mappings in the upper 1TB. This simplifies the code in a number of places and eliminates an unecessary set of pagetables. It also tweaks the unmap/free path a little, allowing us to remove the unmap_im_area() set of page table walkers, replacing them with unmap_vm_area(). Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
6879dc137e
commit
20cee16ced
@ -505,7 +505,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
unsigned long pa;
|
unsigned long pa;
|
||||||
|
|
||||||
ptep = find_linux_pte(ioremap_mm.pgd, token);
|
ptep = find_linux_pte(init_mm.pgd, token);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return token;
|
return token;
|
||||||
pa = pte_pfn(*ptep) << PAGE_SHIFT;
|
pa = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||||
|
@ -2121,10 +2121,6 @@ empty_zero_page:
|
|||||||
swapper_pg_dir:
|
swapper_pg_dir:
|
||||||
.space 4096
|
.space 4096
|
||||||
|
|
||||||
.globl ioremap_dir
|
|
||||||
ioremap_dir:
|
|
||||||
.space 4096
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
|
/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
|
||||||
.globl stab_array
|
.globl stab_array
|
||||||
|
@ -58,14 +58,6 @@ struct task_struct *last_task_used_math = NULL;
|
|||||||
struct task_struct *last_task_used_altivec = NULL;
|
struct task_struct *last_task_used_altivec = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct mm_struct ioremap_mm = {
|
|
||||||
.pgd = ioremap_dir,
|
|
||||||
.mm_users = ATOMIC_INIT(2),
|
|
||||||
.mm_count = ATOMIC_INIT(1),
|
|
||||||
.cpu_vm_mask = CPU_MASK_ALL,
|
|
||||||
.page_table_lock = SPIN_LOCK_UNLOCKED,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the floating-point register state in the
|
* Make sure the floating-point register state in the
|
||||||
* the thread_struct is up to date for task tsk.
|
* the thread_struct is up to date for task tsk.
|
||||||
|
@ -310,10 +310,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
|||||||
|
|
||||||
vsid = get_vsid(mm->context.id, ea);
|
vsid = get_vsid(mm->context.id, ea);
|
||||||
break;
|
break;
|
||||||
case IO_REGION_ID:
|
|
||||||
mm = &ioremap_mm;
|
|
||||||
vsid = get_kernel_vsid(ea);
|
|
||||||
break;
|
|
||||||
case VMALLOC_REGION_ID:
|
case VMALLOC_REGION_ID:
|
||||||
mm = &init_mm;
|
mm = &init_mm;
|
||||||
vsid = get_kernel_vsid(ea);
|
vsid = get_kernel_vsid(ea);
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/semaphore.h>
|
#include <asm/semaphore.h>
|
||||||
#include <asm/imalloc.h>
|
#include <asm/imalloc.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
static DECLARE_MUTEX(imlist_sem);
|
static DECLARE_MUTEX(imlist_sem);
|
||||||
struct vm_struct * imlist = NULL;
|
struct vm_struct * imlist = NULL;
|
||||||
@ -285,29 +286,32 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
|||||||
return area;
|
return area;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long im_free(void * addr)
|
void im_free(void * addr)
|
||||||
{
|
{
|
||||||
struct vm_struct **p, *tmp;
|
struct vm_struct **p, *tmp;
|
||||||
unsigned long ret_size = 0;
|
|
||||||
|
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return ret_size;
|
return;
|
||||||
if ((PAGE_SIZE-1) & (unsigned long) addr) {
|
if ((unsigned long) addr & ~PAGE_MASK) {
|
||||||
printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
|
printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
|
||||||
return ret_size;
|
return;
|
||||||
}
|
}
|
||||||
down(&imlist_sem);
|
down(&imlist_sem);
|
||||||
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
|
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
|
||||||
if (tmp->addr == addr) {
|
if (tmp->addr == addr) {
|
||||||
ret_size = tmp->size;
|
|
||||||
*p = tmp->next;
|
*p = tmp->next;
|
||||||
|
|
||||||
|
/* XXX: do we need the lock? */
|
||||||
|
spin_lock(&init_mm.page_table_lock);
|
||||||
|
unmap_vm_area(tmp);
|
||||||
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
|
|
||||||
kfree(tmp);
|
kfree(tmp);
|
||||||
up(&imlist_sem);
|
up(&imlist_sem);
|
||||||
return ret_size;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up(&imlist_sem);
|
up(&imlist_sem);
|
||||||
printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
|
printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
|
||||||
addr);
|
addr);
|
||||||
return ret_size;
|
|
||||||
}
|
}
|
||||||
|
@ -73,9 +73,6 @@ static unsigned long phbs_io_bot = PHBS_IO_BASE;
|
|||||||
extern pgd_t swapper_pg_dir[];
|
extern pgd_t swapper_pg_dir[];
|
||||||
extern struct task_struct *current_set[NR_CPUS];
|
extern struct task_struct *current_set[NR_CPUS];
|
||||||
|
|
||||||
extern pgd_t ioremap_dir[];
|
|
||||||
pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
|
|
||||||
|
|
||||||
unsigned long klimit = (unsigned long)_end;
|
unsigned long klimit = (unsigned long)_end;
|
||||||
|
|
||||||
unsigned long _SDR1=0;
|
unsigned long _SDR1=0;
|
||||||
@ -137,69 +134,6 @@ void iounmap(volatile void __iomem *addr)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
pte_t *pte;
|
|
||||||
|
|
||||||
pte = pte_offset_kernel(pmd, addr);
|
|
||||||
do {
|
|
||||||
pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
|
|
||||||
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
pmd_t *pmd;
|
|
||||||
unsigned long next;
|
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
|
||||||
do {
|
|
||||||
next = pmd_addr_end(addr, end);
|
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
|
||||||
continue;
|
|
||||||
unmap_im_area_pte(pmd, addr, next);
|
|
||||||
} while (pmd++, addr = next, addr != end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
pud_t *pud;
|
|
||||||
unsigned long next;
|
|
||||||
|
|
||||||
pud = pud_offset(pgd, addr);
|
|
||||||
do {
|
|
||||||
next = pud_addr_end(addr, end);
|
|
||||||
if (pud_none_or_clear_bad(pud))
|
|
||||||
continue;
|
|
||||||
unmap_im_area_pmd(pud, addr, next);
|
|
||||||
} while (pud++, addr = next, addr != end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void unmap_im_area(unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = &ioremap_mm;
|
|
||||||
unsigned long next;
|
|
||||||
pgd_t *pgd;
|
|
||||||
|
|
||||||
spin_lock(&mm->page_table_lock);
|
|
||||||
|
|
||||||
pgd = pgd_offset_i(addr);
|
|
||||||
flush_cache_vunmap(addr, end);
|
|
||||||
do {
|
|
||||||
next = pgd_addr_end(addr, end);
|
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
|
||||||
continue;
|
|
||||||
unmap_im_area_pud(pgd, addr, next);
|
|
||||||
} while (pgd++, addr = next, addr != end);
|
|
||||||
flush_tlb_kernel_range(start, end);
|
|
||||||
|
|
||||||
spin_unlock(&mm->page_table_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* map_io_page currently only called by __ioremap
|
* map_io_page currently only called by __ioremap
|
||||||
* map_io_page adds an entry to the ioremap page table
|
* map_io_page adds an entry to the ioremap page table
|
||||||
@ -214,21 +148,21 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
|
|||||||
unsigned long vsid;
|
unsigned long vsid;
|
||||||
|
|
||||||
if (mem_init_done) {
|
if (mem_init_done) {
|
||||||
spin_lock(&ioremap_mm.page_table_lock);
|
spin_lock(&init_mm.page_table_lock);
|
||||||
pgdp = pgd_offset_i(ea);
|
pgdp = pgd_offset_k(ea);
|
||||||
pudp = pud_alloc(&ioremap_mm, pgdp, ea);
|
pudp = pud_alloc(&init_mm, pgdp, ea);
|
||||||
if (!pudp)
|
if (!pudp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
|
pmdp = pmd_alloc(&init_mm, pudp, ea);
|
||||||
if (!pmdp)
|
if (!pmdp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
|
ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
pa = abs_to_phys(pa);
|
pa = abs_to_phys(pa);
|
||||||
set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
|
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
|
||||||
__pgprot(flags)));
|
__pgprot(flags)));
|
||||||
spin_unlock(&ioremap_mm.page_table_lock);
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
} else {
|
} else {
|
||||||
unsigned long va, vpn, hash, hpteg;
|
unsigned long va, vpn, hash, hpteg;
|
||||||
|
|
||||||
@ -267,13 +201,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
|
|||||||
|
|
||||||
for (i = 0; i < size; i += PAGE_SIZE)
|
for (i = 0; i < size; i += PAGE_SIZE)
|
||||||
if (map_io_page(ea+i, pa+i, flags))
|
if (map_io_page(ea+i, pa+i, flags))
|
||||||
goto failure;
|
return NULL;
|
||||||
|
|
||||||
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
||||||
failure:
|
|
||||||
if (mem_init_done)
|
|
||||||
unmap_im_area(ea, ea + size);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -381,19 +311,14 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
|
|||||||
*/
|
*/
|
||||||
void iounmap(volatile void __iomem *token)
|
void iounmap(volatile void __iomem *token)
|
||||||
{
|
{
|
||||||
unsigned long address, size;
|
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
if (!mem_init_done)
|
if (!mem_init_done)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
|
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
|
||||||
|
|
||||||
if ((size = im_free(addr)) == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
address = (unsigned long)addr;
|
im_free(addr);
|
||||||
unmap_im_area(address, address + size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
|
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
/*
|
/*
|
||||||
* Define the address range of the imalloc VM area.
|
* Define the address range of the imalloc VM area.
|
||||||
*/
|
*/
|
||||||
#define PHBS_IO_BASE IOREGIONBASE
|
#define PHBS_IO_BASE VMALLOC_END
|
||||||
#define IMALLOC_BASE (IOREGIONBASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
|
#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
|
||||||
#define IMALLOC_END (IOREGIONBASE + EADDR_MASK)
|
#define IMALLOC_END (VMALLOC_START + EADDR_MASK)
|
||||||
|
|
||||||
|
|
||||||
/* imalloc region types */
|
/* imalloc region types */
|
||||||
@ -18,7 +18,9 @@
|
|||||||
|
|
||||||
extern struct vm_struct * im_get_free_area(unsigned long size);
|
extern struct vm_struct * im_get_free_area(unsigned long size);
|
||||||
extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
||||||
int region_type);
|
int region_type);
|
||||||
unsigned long im_free(void *addr);
|
extern void im_free(void *addr);
|
||||||
|
|
||||||
|
extern unsigned long ioremap_bot;
|
||||||
|
|
||||||
#endif /* _PPC64_IMALLOC_H */
|
#endif /* _PPC64_IMALLOC_H */
|
||||||
|
@ -202,9 +202,7 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
|
|||||||
#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
|
#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
|
||||||
#define KERNELBASE PAGE_OFFSET
|
#define KERNELBASE PAGE_OFFSET
|
||||||
#define VMALLOCBASE ASM_CONST(0xD000000000000000)
|
#define VMALLOCBASE ASM_CONST(0xD000000000000000)
|
||||||
#define IOREGIONBASE ASM_CONST(0xE000000000000000)
|
|
||||||
|
|
||||||
#define IO_REGION_ID (IOREGIONBASE >> REGION_SHIFT)
|
|
||||||
#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
|
#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
|
||||||
#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
|
#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
|
||||||
#define USER_REGION_ID (0UL)
|
#define USER_REGION_ID (0UL)
|
||||||
|
@ -53,7 +53,8 @@
|
|||||||
* Define the address range of the vmalloc VM area.
|
* Define the address range of the vmalloc VM area.
|
||||||
*/
|
*/
|
||||||
#define VMALLOC_START (0xD000000000000000ul)
|
#define VMALLOC_START (0xD000000000000000ul)
|
||||||
#define VMALLOC_END (VMALLOC_START + EADDR_MASK)
|
#define VMALLOC_SIZE (0x10000000000UL)
|
||||||
|
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits in a linux-style PTE. These match the bits in the
|
* Bits in a linux-style PTE. These match the bits in the
|
||||||
@ -239,9 +240,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
|
|||||||
/* This now only contains the vmalloc pages */
|
/* This now only contains the vmalloc pages */
|
||||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||||
|
|
||||||
/* to find an entry in the ioremap page-table-directory */
|
|
||||||
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following only work if pte_present() is true.
|
* The following only work if pte_present() is true.
|
||||||
* Undefined behaviour if not..
|
* Undefined behaviour if not..
|
||||||
@ -459,15 +457,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
|
|||||||
#define __HAVE_ARCH_PTE_SAME
|
#define __HAVE_ARCH_PTE_SAME
|
||||||
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
|
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
|
||||||
|
|
||||||
extern unsigned long ioremap_bot, ioremap_base;
|
|
||||||
|
|
||||||
#define pmd_ERROR(e) \
|
#define pmd_ERROR(e) \
|
||||||
printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
|
printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
|
||||||
#define pgd_ERROR(e) \
|
#define pgd_ERROR(e) \
|
||||||
printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
|
printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
|
||||||
|
|
||||||
extern pgd_t swapper_pg_dir[];
|
extern pgd_t swapper_pg_dir[];
|
||||||
extern pgd_t ioremap_dir[];
|
|
||||||
|
|
||||||
extern void paging_init(void);
|
extern void paging_init(void);
|
||||||
|
|
||||||
|
@ -429,16 +429,6 @@ struct thread_struct {
|
|||||||
.fpexc_mode = MSR_FE0|MSR_FE1, \
|
.fpexc_mode = MSR_FE0|MSR_FE1, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Note: the vm_start and vm_end fields here should *not*
|
|
||||||
* be in kernel space. (Could vm_end == vm_start perhaps?)
|
|
||||||
*/
|
|
||||||
#define IOREMAP_MMAP { &ioremap_mm, 0, 0x1000, NULL, \
|
|
||||||
PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, \
|
|
||||||
1, NULL, NULL }
|
|
||||||
|
|
||||||
extern struct mm_struct ioremap_mm;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return saved PC of a blocked thread. For now, this is the "user" PC
|
* Return saved PC of a blocked thread. For now, this is the "user" PC
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user