[PATCH] include/asm-sh64/: "extern inline" -> "static inline"
"extern inline" doesn't make much sense. Signed-off-by: Adrian Bunk <bunk@stusta.de> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e0795cf46d
commit
ca5ed2f5c7
@ -143,12 +143,12 @@ extern unsigned long pciio_virt;
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
* These are trivial on the 1:1 Linux/SuperH mapping
|
||||
*/
|
||||
extern __inline__ unsigned long virt_to_phys(volatile void * address)
|
||||
static inline unsigned long virt_to_phys(volatile void * address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
extern __inline__ void * phys_to_virt(unsigned long address)
|
||||
static inline void * phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
@ -156,12 +156,12 @@ extern __inline__ void * phys_to_virt(unsigned long address)
|
||||
extern void * __ioremap(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long flags);
|
||||
|
||||
extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
|
||||
static inline void * ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __ioremap(phys_addr, size, 1);
|
||||
}
|
||||
|
||||
extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
|
||||
static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __ioremap(phys_addr, size, 0);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ extern pgd_t *mmu_pdtp_cache;
|
||||
*/
|
||||
#define MMU_VPN_MASK 0xfffff000
|
||||
|
||||
extern __inline__ void
|
||||
static inline void
|
||||
get_new_mmu_context(struct mm_struct *mm)
|
||||
{
|
||||
extern void flush_tlb_all(void);
|
||||
|
@ -38,14 +38,14 @@ static inline void pgd_init(unsigned long page)
|
||||
* if any.
|
||||
*/
|
||||
|
||||
extern __inline__ pgd_t *get_pgd_slow(void)
|
||||
static inline pgd_t *get_pgd_slow(void)
|
||||
{
|
||||
unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
|
||||
pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern __inline__ pgd_t *get_pgd_fast(void)
|
||||
static inline pgd_t *get_pgd_fast(void)
|
||||
{
|
||||
unsigned long *ret;
|
||||
|
||||
@ -62,14 +62,14 @@ extern __inline__ pgd_t *get_pgd_fast(void)
|
||||
return (pgd_t *)ret;
|
||||
}
|
||||
|
||||
extern __inline__ void free_pgd_fast(pgd_t *pgd)
|
||||
static inline void free_pgd_fast(pgd_t *pgd)
|
||||
{
|
||||
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
|
||||
pgd_quicklist = (unsigned long *) pgd;
|
||||
pgtable_cache_size++;
|
||||
}
|
||||
|
||||
extern __inline__ void free_pgd_slow(pgd_t *pgd)
|
||||
static inline void free_pgd_slow(pgd_t *pgd)
|
||||
{
|
||||
kfree((void *)pgd);
|
||||
}
|
||||
@ -77,7 +77,7 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd)
|
||||
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
|
||||
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
|
||||
|
||||
extern __inline__ pte_t *get_pte_fast(void)
|
||||
static inline pte_t *get_pte_fast(void)
|
||||
{
|
||||
unsigned long *ret;
|
||||
|
||||
@ -89,7 +89,7 @@ extern __inline__ pte_t *get_pte_fast(void)
|
||||
return (pte_t *)ret;
|
||||
}
|
||||
|
||||
extern __inline__ void free_pte_fast(pte_t *pte)
|
||||
static inline void free_pte_fast(pte_t *pte)
|
||||
{
|
||||
*(unsigned long *)pte = (unsigned long) pte_quicklist;
|
||||
pte_quicklist = (unsigned long *) pte;
|
||||
@ -167,7 +167,7 @@ static __inline__ void pmd_free(pmd_t *pmd)
|
||||
|
||||
extern int do_check_pgt_cache(int, int);
|
||||
|
||||
extern inline void set_pgdir(unsigned long address, pgd_t entry)
|
||||
static inline void set_pgdir(unsigned long address, pgd_t entry)
|
||||
{
|
||||
struct task_struct * p;
|
||||
pgd_t *pgd;
|
||||
|
@ -421,18 +421,18 @@ static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
|
||||
static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
|
||||
|
||||
extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
|
||||
extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
|
||||
extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
|
||||
extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
|
||||
extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
|
||||
static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
|
||||
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
|
||||
static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
|
||||
static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
|
||||
|
||||
extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
|
||||
extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
|
||||
extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
|
||||
extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
|
||||
extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
|
||||
extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
|
||||
static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
|
||||
static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
|
||||
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
|
||||
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
|
||||
|
||||
|
||||
/*
|
||||
@ -456,7 +456,7 @@ extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
|
||||
#define mk_pte_phys(physpage, pgprot) \
|
||||
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
|
||||
|
||||
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
|
||||
|
||||
typedef pte_t *pte_addr_t;
|
||||
|
@ -228,7 +228,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
|
||||
* FPU lazy state save handling.
|
||||
*/
|
||||
|
||||
extern __inline__ void release_fpu(void)
|
||||
static inline void release_fpu(void)
|
||||
{
|
||||
unsigned long long __dummy;
|
||||
|
||||
@ -240,7 +240,7 @@ extern __inline__ void release_fpu(void)
|
||||
: "r" (SR_FD));
|
||||
}
|
||||
|
||||
extern __inline__ void grab_fpu(void)
|
||||
static inline void grab_fpu(void)
|
||||
{
|
||||
unsigned long long __dummy;
|
||||
|
||||
|
@ -132,7 +132,7 @@ static __inline__ void local_irq_disable(void)
|
||||
(flags != 0); \
|
||||
})
|
||||
|
||||
extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
|
||||
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
|
||||
{
|
||||
unsigned long flags, retval;
|
||||
|
||||
@ -143,7 +143,7 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
|
||||
return retval;
|
||||
}
|
||||
|
||||
extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
|
||||
static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
|
||||
{
|
||||
unsigned long flags, retval;
|
||||
|
||||
|
@ -20,7 +20,7 @@ extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
||||
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
|
||||
static inline void flush_tlb_pgtables(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count);
|
||||
*/
|
||||
extern long __strnlen_user(const char *__s, long __n);
|
||||
|
||||
extern __inline__ long strnlen_user(const char *s, long n)
|
||||
static inline long strnlen_user(const char *s, long n)
|
||||
{
|
||||
if (!__addr_ok(s))
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user