2005-04-17 02:20:36 +04:00
# ifndef _PARISC_CACHEFLUSH_H
# define _PARISC_CACHEFLUSH_H
# include <linux/mm.h>
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 11:25:56 +04:00
# include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
2005-04-17 02:20:36 +04:00
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately , that doesn ' t apply to PA - RISC . */
/* Cache flush operations */
# ifdef CONFIG_SMP
# define flush_cache_mm(mm) flush_cache_all()
# else
# define flush_cache_mm(mm) flush_cache_all_local()
# endif
# define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm ( ( start ) , ( start ) + ( size ) ) ;
extern void flush_cache_all_local ( void ) ;
static inline void cacheflush_h_tmp_function ( void * dummy )
{
flush_cache_all_local ( ) ;
}
static inline void flush_cache_all ( void )
{
on_each_cpu ( cacheflush_h_tmp_function , NULL , 1 , 1 ) ;
}
# define flush_cache_vmap(start, end) flush_cache_all()
# define flush_cache_vunmap(start, end) flush_cache_all()
extern int parisc_cache_flush_threshold ;
void parisc_setup_cache_timing ( void ) ;
static inline void
flush_user_dcache_range ( unsigned long start , unsigned long end )
{
if ( ( end - start ) < parisc_cache_flush_threshold )
flush_user_dcache_range_asm ( start , end ) ;
else
flush_data_cache ( ) ;
}
static inline void
flush_user_icache_range ( unsigned long start , unsigned long end )
{
if ( ( end - start ) < parisc_cache_flush_threshold )
flush_user_icache_range_asm ( start , end ) ;
else
flush_instruction_cache ( ) ;
}
extern void flush_dcache_page ( struct page * page ) ;
# define flush_dcache_mmap_lock(mapping) \
write_lock_irq ( & ( mapping ) - > tree_lock )
# define flush_dcache_mmap_unlock(mapping) \
write_unlock_irq ( & ( mapping ) - > tree_lock )
2006-03-22 19:42:04 +03:00
# define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
2005-04-17 02:20:36 +04:00
# define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
# define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page ( vma , vaddr , page_to_pfn ( page ) ) ; \
memcpy ( dst , src , len ) ; \
flush_kernel_dcache_range_asm ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ; \
} while ( 0 )
# define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page ( vma , vaddr , page_to_pfn ( page ) ) ; \
memcpy ( dst , src , len ) ; \
} while ( 0 )
static inline void flush_cache_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
int sr3 ;
if ( ! vma - > vm_mm - > context ) {
BUG ( ) ;
return ;
}
sr3 = mfsp ( 3 ) ;
if ( vma - > vm_mm - > context = = sr3 ) {
flush_user_dcache_range ( start , end ) ;
flush_user_icache_range ( start , end ) ;
} else {
flush_cache_all ( ) ;
}
}
/* Simple function to work out if we have an existing address translation
* for a user space vma . */
2005-10-30 04:16:36 +03:00
static inline int translation_exists ( struct vm_area_struct * vma ,
unsigned long addr , unsigned long pfn )
2005-04-17 02:20:36 +04:00
{
2005-10-30 04:16:36 +03:00
pgd_t * pgd = pgd_offset ( vma - > vm_mm , addr ) ;
2005-04-17 02:20:36 +04:00
pmd_t * pmd ;
2005-10-30 04:16:36 +03:00
pte_t pte ;
2005-04-17 02:20:36 +04:00
if ( pgd_none ( * pgd ) )
2005-10-30 04:16:36 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
pmd = pmd_offset ( pgd , addr ) ;
if ( pmd_none ( * pmd ) | | pmd_bad ( * pmd ) )
2005-10-30 04:16:36 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
2005-10-30 04:16:36 +03:00
/* We cannot take the pte lock here: flush_cache_page is usually
* called with pte lock already held . Whereas flush_dcache_page
* takes flush_dcache_mmap_lock , which is lower in the hierarchy :
* the vma itself is secure , but the pte might come or go racily .
*/
pte = * pte_offset_map ( pmd , addr ) ;
/* But pte_unmap() does nothing on this architecture */
2005-04-17 02:20:36 +04:00
2005-10-30 04:16:36 +03:00
/* Filter out coincidental file entries and swap entries */
if ( ! ( pte_val ( pte ) & ( _PAGE_FLUSH | _PAGE_PRESENT ) ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2005-10-30 04:16:36 +03:00
return pte_pfn ( pte ) = = pfn ;
}
2005-04-17 02:20:36 +04:00
/* Private function to flush a page from the cache of a non-current
* process . cr25 contains the Page Directory of the current user
* process ; we ' re going to hijack both it and the user space % sr3 to
* temporarily make the non - current process current . We have to do
* this because cache flushing may cause a non - access tlb miss which
* the handlers have to fill in from the pgd of the non - current
* process . */
static inline void
flush_user_cache_page_non_current ( struct vm_area_struct * vma ,
unsigned long vmaddr )
{
/* save the current process space and pgd */
unsigned long space = mfsp ( 3 ) , pgd = mfctl ( 25 ) ;
/* we don't mind taking interrups since they may not
* do anything with user space , but we can ' t
* be preempted here */
preempt_disable ( ) ;
/* make us current */
mtctl ( __pa ( vma - > vm_mm - > pgd ) , 25 ) ;
mtsp ( vma - > vm_mm - > context , 3 ) ;
flush_user_dcache_page ( vmaddr ) ;
if ( vma - > vm_flags & VM_EXEC )
flush_user_icache_page ( vmaddr ) ;
/* put the old current process back */
mtsp ( space , 3 ) ;
mtctl ( pgd , 25 ) ;
preempt_enable ( ) ;
}
static inline void
__flush_cache_page ( struct vm_area_struct * vma , unsigned long vmaddr )
{
if ( likely ( vma - > vm_mm - > context = = mfsp ( 3 ) ) ) {
flush_user_dcache_page ( vmaddr ) ;
if ( vma - > vm_flags & VM_EXEC )
flush_user_icache_page ( vmaddr ) ;
} else {
flush_user_cache_page_non_current ( vma , vmaddr ) ;
}
}
static inline void
flush_cache_page ( struct vm_area_struct * vma , unsigned long vmaddr , unsigned long pfn )
{
BUG_ON ( ! vma - > vm_mm - > context ) ;
2005-10-30 04:16:36 +03:00
if ( likely ( translation_exists ( vma , vmaddr , pfn ) ) )
2005-04-17 02:20:36 +04:00
__flush_cache_page ( vma , vmaddr ) ;
}
2006-01-13 23:21:06 +03:00
2006-03-22 18:28:59 +03:00
static inline void
flush_anon_page ( struct page * page , unsigned long vmaddr )
{
if ( PageAnon ( page ) )
flush_user_dcache_page ( vmaddr ) ;
}
# define ARCH_HAS_FLUSH_ANON_PAGE
2006-03-22 19:42:04 +03:00
static inline void
flush_kernel_dcache_page ( struct page * page )
{
flush_kernel_dcache_page_asm ( page_address ( page ) ) ;
}
# define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
2006-01-13 23:21:06 +03:00
# ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro ( void ) ;
2005-04-17 02:20:36 +04:00
# endif
2006-01-13 23:21:06 +03:00
# endif /* _PARISC_CACHEFLUSH_H */