2022-05-31 18:04:11 +08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2020 - 2022 Loongson Technology Corporation Limited
*
* Derived from MIPS :
* Copyright ( C ) 1994 - 2003 , 06 , 07 by Ralf Baechle ( ralf @ linux - mips . org )
* Copyright ( C ) 2007 MIPS Technologies , Inc .
*/
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
# include <linux/cacheinfo.h>
2022-05-31 18:04:11 +08:00
# include <linux/export.h>
# include <linux/fs.h>
# include <linux/highmem.h>
# include <linux/kernel.h>
# include <linux/linkage.h>
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/syscalls.h>
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
# include <asm/bootinfo.h>
2022-05-31 18:04:11 +08:00
# include <asm/cacheflush.h>
# include <asm/cpu.h>
# include <asm/cpu-features.h>
# include <asm/loongarch.h>
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
# include <asm/numa.h>
2022-05-31 18:04:11 +08:00
# include <asm/processor.h>
# include <asm/setup.h>
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
void cache_error_setup ( void )
{
extern char __weak except_vec_cex ;
set_merr_handler ( 0x0 , & except_vec_cex , 0x80 ) ;
}
2022-05-31 18:04:11 +08:00
/*
* LoongArch maintains ICache / DCache coherency by hardware ,
* we just need " ibar " to avoid instruction hazard here .
*/
void local_flush_icache_range ( unsigned long start , unsigned long end )
{
asm volatile ( " \t ibar 0 \n " : : ) ;
}
EXPORT_SYMBOL ( local_flush_icache_range ) ;
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
static void flush_cache_leaf ( unsigned int leaf )
2022-05-31 18:04:11 +08:00
{
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
int i , j , nr_nodes ;
uint64_t addr = CSR_DMW0_BASE ;
struct cache_desc * cdesc = current_cpu_data . cache_leaves + leaf ;
nr_nodes = cache_private ( cdesc ) ? 1 : loongson_sysconf . nr_nodes ;
do {
for ( i = 0 ; i < cdesc - > sets ; i + + ) {
for ( j = 0 ; j < cdesc - > ways ; j + + ) {
flush_cache_line ( leaf , addr ) ;
addr + + ;
}
addr - = cdesc - > ways ;
addr + = cdesc - > linesz ;
}
addr + = ( 1ULL < < NODE_ADDRSPACE_SHIFT ) ;
} while ( - - nr_nodes > 0 ) ;
2022-05-31 18:04:11 +08:00
}
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
asmlinkage __visible void __flush_cache_all ( void )
2022-05-31 18:04:11 +08:00
{
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
int leaf ;
struct cache_desc * cdesc = current_cpu_data . cache_leaves ;
unsigned int cache_present = current_cpu_data . cache_leaves_present ;
leaf = cache_present - 1 ;
if ( cache_inclusive ( cdesc + leaf ) ) {
flush_cache_leaf ( leaf ) ;
return ;
}
for ( leaf = 0 ; leaf < cache_present ; leaf + + )
flush_cache_leaf ( leaf ) ;
2022-05-31 18:04:11 +08:00
}
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
# define L1IUPRE (1 << 0)
# define L1IUUNIFY (1 << 1)
# define L1DPRE (1 << 2)
# define LXIUPRE (1 << 0)
# define LXIUUNIFY (1 << 1)
# define LXIUPRIV (1 << 2)
# define LXIUINCL (1 << 3)
# define LXDPRE (1 << 4)
# define LXDPRIV (1 << 5)
# define LXDINCL (1 << 6)
# define populate_cache_properties(cfg0, cdesc, level, leaf) \
do { \
unsigned int cfg1 ; \
\
cfg1 = read_cpucfg ( LOONGARCH_CPUCFG17 + leaf ) ; \
if ( level = = 1 ) { \
cdesc - > flags | = CACHE_PRIVATE ; \
} else { \
if ( cfg0 & LXIUPRIV ) \
cdesc - > flags | = CACHE_PRIVATE ; \
if ( cfg0 & LXIUINCL ) \
cdesc - > flags | = CACHE_INCLUSIVE ; \
} \
cdesc - > level = level ; \
cdesc - > flags | = CACHE_PRESENT ; \
cdesc - > ways = ( ( cfg1 & CPUCFG_CACHE_WAYS_M ) > > CPUCFG_CACHE_WAYS ) + 1 ; \
cdesc - > sets = 1 < < ( ( cfg1 & CPUCFG_CACHE_SETS_M ) > > CPUCFG_CACHE_SETS ) ; \
cdesc - > linesz = 1 < < ( ( cfg1 & CPUCFG_CACHE_LSIZE_M ) > > CPUCFG_CACHE_LSIZE ) ; \
cdesc + + ; leaf + + ; \
} while ( 0 )
2022-05-31 18:04:11 +08:00
void cpu_cache_init ( void )
{
LoongArch: Refactor cache probe and flush methods
Current cache probe and flush methods have some drawbacks:
1, Assume there are 3 cache levels and only 3 levels;
2, Assume L1 = I + D, L2 = V, L3 = S, V is exclusive, S is inclusive.
However, the fact is I + D, I + D + V, I + D + S and I + D + V + S are
all valid. So, refactor the cache probe and flush methods to adapt more
types of cache hierarchy.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-10-12 16:36:14 +08:00
unsigned int leaf = 0 , level = 1 ;
unsigned int config = read_cpucfg ( LOONGARCH_CPUCFG16 ) ;
struct cache_desc * cdesc = current_cpu_data . cache_leaves ;
if ( config & L1IUPRE ) {
if ( config & L1IUUNIFY )
cdesc - > type = CACHE_TYPE_UNIFIED ;
else
cdesc - > type = CACHE_TYPE_INST ;
populate_cache_properties ( config , cdesc , level , leaf ) ;
}
if ( config & L1DPRE ) {
cdesc - > type = CACHE_TYPE_DATA ;
populate_cache_properties ( config , cdesc , level , leaf ) ;
}
config = config > > 3 ;
for ( level = 2 ; level < = CACHE_LEVEL_MAX ; level + + ) {
if ( ! config )
break ;
if ( config & LXIUPRE ) {
if ( config & LXIUUNIFY )
cdesc - > type = CACHE_TYPE_UNIFIED ;
else
cdesc - > type = CACHE_TYPE_INST ;
populate_cache_properties ( config , cdesc , level , leaf ) ;
}
if ( config & LXDPRE ) {
cdesc - > type = CACHE_TYPE_DATA ;
populate_cache_properties ( config , cdesc , level , leaf ) ;
}
config = config > > 7 ;
}
BUG_ON ( leaf > CACHE_LEAVES_MAX ) ;
current_cpu_data . cache_leaves_present = leaf ;
current_cpu_data . options | = LOONGARCH_CPU_PREFETCH ;
2022-05-31 18:04:11 +08:00
shm_align_mask = PAGE_SIZE - 1 ;
}
2022-07-11 12:35:43 +05:30
static const pgprot_t protection_map [ 16 ] = {
[ VM_NONE ] = __pgprot ( _CACHE_CC | _PAGE_USER |
_PAGE_PROTNONE | _PAGE_NO_EXEC |
_PAGE_NO_READ ) ,
[ VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC ) ,
[ VM_WRITE ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC ) ,
[ VM_WRITE | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC ) ,
[ VM_EXEC ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_EXEC | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_EXEC | VM_WRITE ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_EXEC | VM_WRITE | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_SHARED ] = __pgprot ( _CACHE_CC | _PAGE_USER |
_PAGE_PROTNONE | _PAGE_NO_EXEC |
_PAGE_NO_READ ) ,
[ VM_SHARED | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC ) ,
[ VM_SHARED | VM_WRITE ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC | _PAGE_WRITE ) ,
[ VM_SHARED | VM_WRITE | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC | _PAGE_WRITE ) ,
[ VM_SHARED | VM_EXEC ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_SHARED | VM_EXEC | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT ) ,
[ VM_SHARED | VM_EXEC | VM_WRITE ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_WRITE ) ,
[ VM_SHARED | VM_EXEC | VM_WRITE | VM_READ ] = __pgprot ( _CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_WRITE )
} ;
DECLARE_VM_GET_PAGE_PROT