2005-04-17 02:20:36 +04:00
/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
*
* include / asm - sh / cache . h
*
* Copyright 1999 ( C ) Niibe Yutaka
* Copyright 2002 , 2003 ( C ) Paul Mundt
*/
# ifndef __ASM_SH_CACHE_H
# define __ASM_SH_CACHE_H
# ifdef __KERNEL__
2007-06-11 10:32:07 +04:00
# include <linux/init.h>
2005-04-17 02:20:36 +04:00
# include <asm/cpu/cache.h>
# define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2007-10-30 11:39:07 +03:00
# define __read_mostly __attribute__((__section__(".data.read_mostly")))
2007-03-01 09:56:31 +03:00
# ifndef __ASSEMBLY__
2005-04-17 02:20:36 +04:00
struct cache_info {
2006-09-27 09:09:26 +04:00
unsigned int ways ; /* Number of cache ways */
unsigned int sets ; /* Number of cache sets */
unsigned int linesz ; /* Cache line size (bytes) */
2005-04-17 02:20:36 +04:00
2006-09-27 09:09:26 +04:00
unsigned int way_size ; /* sets * line size */
2005-04-17 02:20:36 +04:00
2006-09-27 09:09:26 +04:00
/*
* way_incr is the address offset for accessing the next way
* in memory mapped cache array ops .
*/
unsigned int way_incr ;
2005-04-17 02:20:36 +04:00
unsigned int entry_shift ;
unsigned int entry_mask ;
2006-09-27 09:09:26 +04:00
/*
* Compute a mask which selects the address bits which overlap between
* 1. those used to select the cache set during indexing
* 2. those in the physical page number .
*/
unsigned int alias_mask ;
unsigned int n_aliases ; /* Number of aliases */
2005-04-17 02:20:36 +04:00
unsigned long flags ;
} ;
2007-06-11 10:32:07 +04:00
int __init detect_cpu_and_cache_system ( void ) ;
2007-03-01 09:56:31 +03:00
# endif /* __ASSEMBLY__ */
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
# endif /* __ASM_SH_CACHE_H */