LoongArch: Make the CPUCFG&CSR ops simple aliases of compiler built-ins

In addition to less visual clutter, this also makes Clang happy
regarding the const-ness of arguments. In the original approach, all
Clang gets to see is the incoming arguments whose const-ness cannot be
proven without first being inlined; so Clang errors out here while GCC
is fine.

While at it, tweak several printk format strings because the return type
of csr_read64 becomes effectively unsigned long, instead of unsigned
long long.

Signed-off-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
WANG Xuerui 2023-06-29 20:58:43 +08:00 committed by Huacai Chen
parent 38bb46f945
commit 53a4858ccd
3 changed files with 15 additions and 56 deletions

View File

@ -56,10 +56,7 @@ __asm__(".macro parse_r var r\n\t"
#undef _IFC_REG
/* CPUCFG */
static inline u32 read_cpucfg(u32 reg)
{
return __cpucfg(reg);
}
#define read_cpucfg(reg) __cpucfg(reg)
#endif /* !__ASSEMBLY__ */
@ -206,56 +203,18 @@ static inline u32 read_cpucfg(u32 reg)
#ifndef __ASSEMBLY__
/* CSR */
static __always_inline u32 csr_read32(u32 reg)
{
return __csrrd_w(reg);
}
static __always_inline u64 csr_read64(u32 reg)
{
return __csrrd_d(reg);
}
static __always_inline void csr_write32(u32 val, u32 reg)
{
__csrwr_w(val, reg);
}
static __always_inline void csr_write64(u64 val, u32 reg)
{
__csrwr_d(val, reg);
}
static __always_inline u32 csr_xchg32(u32 val, u32 mask, u32 reg)
{
return __csrxchg_w(val, mask, reg);
}
static __always_inline u64 csr_xchg64(u64 val, u64 mask, u32 reg)
{
return __csrxchg_d(val, mask, reg);
}
#define csr_read32(reg) __csrrd_w(reg)
#define csr_read64(reg) __csrrd_d(reg)
#define csr_write32(val, reg) __csrwr_w(val, reg)
#define csr_write64(val, reg) __csrwr_d(val, reg)
#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg)
#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg)
/* IOCSR */
static __always_inline u32 iocsr_read32(u32 reg)
{
return __iocsrrd_w(reg);
}
static __always_inline u64 iocsr_read64(u32 reg)
{
return __iocsrrd_d(reg);
}
static __always_inline void iocsr_write32(u32 val, u32 reg)
{
__iocsrwr_w(val, reg);
}
static __always_inline void iocsr_write64(u64 val, u32 reg)
{
__iocsrwr_d(val, reg);
}
#define iocsr_read32(reg) __iocsrrd_w(reg)
#define iocsr_read64(reg) __iocsrrd_d(reg)
#define iocsr_write32(val, reg) __iocsrwr_w(val, reg)
#define iocsr_write64(val, reg) __iocsrwr_d(val, reg)
#endif /* !__ASSEMBLY__ */

View File

@ -924,7 +924,7 @@ asmlinkage void cache_parity_error(void)
/* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
panic("Can't handle the cache error!");
}

View File

@ -20,9 +20,9 @@ void dump_tlb_regs(void)
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
pr_info("EntryHi : 0x%0*llx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*llx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*llx\n", field, read_csr_entrylo1());
pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1());
}
static void dump_tlb(int first, int last)