There are five MIPS32/64 architecture releases currently available: from 1 to 6 except fourth one, which was intentionally skipped. Three of them can be called as major: 1st, 2nd and 6th, that not only have some system level alterations, but also introduced significant core/ISA level updates. The rest of the MIPS architecture releases are minor. Even though they don't have as much ISA/system/core level changes as the major ones with respect to the previous releases, they still provide a set of updates (I'd say they were intended to be the intermediate releases before a major one) that might be useful for the kernel and user-level code, when activated by the kernel or compiler. In particular the following features were introduced or ended up being available at/after MIPS32/64 Release 5 architecture: + the last release of the misaligned memory access instructions, + virtualisation - VZ ASE - is optional component of the arch, + SIMD - MSA ASE - is optional component of the arch, + DSP ASE is optional component of the arch, + CP0.Status.FR=1 for CP1.FIR.F64=1 (pure 64-bit FPU general registers) must be available if FPU is implemented, + CP1.FIR.Has2008 support is required so CP1.FCSR.{ABS2008,NAN2008} bits are available. + UFR/UNFR aliases to access CP0.Status.FR from user-space by means of ctc1/cfc1 instructions (enabled by CP0.Config5.UFR), + CP0.COnfig5.LLB=1 and eretnc instruction are implemented to without accidentally clearing LL-bit when returning from an interrupt, exception, or error trap, + XPA feature together with extended versions of CPx registers is introduced, which needs to have mfhc0/mthc0 instructions available. So due to these changes GNU GCC provides an extended instructions set support for MIPS32/64 Release 5 by default like eretnc/mfhc0/mthc0. Even though the architecture alteration isn't that big, it still worth to be taken into account by the kernel software. Finally we can't deny that some optimization/limitations might be found in future and implemented on some level in kernel or compiler. In this case having even intermediate MIPS architecture releases support would be more than useful. So the most of the changes provided by this commit can be split into either compile- or runtime configs related. The compile-time related changes are caused by adding the new CONFIG_CPU_MIPS32_R5/CONFIG_CPU_MIPSR5 configs and concern the code activating MIPSR2 or MIPSR6 already implemented features (like eretnc/LLbit, mthc0/mfhc0). In addition CPU_HAS_MSA can be now freely enabled for MIPS32/64 release 5 based platforms as this is done for CPU_MIPS32_R6 CPUs. The runtime changes concerns the features which are handled with respect to the MIPS ISA revision detected at run-time by means of CP0.Config.{AT,AR} bits. Alas these fields can be used to detect either r1 or r2 or r6 releases. But since we know which CPUs in fact support the R5 arch, we can manually set MIPS_CPU_ISA_M32R5/MIPS_CPU_ISA_M64R5 bit of c->isa_level and then use cpu_has_mips32r5/cpu_has_mips64r5 where it's appropriate. Since XPA/EVA provide too complex alterationss and to have them used with MIPS32 Release 2 charged kernels (for compatibility with current platform configs) they are left to be setup as a separate kernel configs. Co-developed-by: Alexey Malahov <Alexey.Malahov@baikalelectronics.ru> Signed-off-by: Alexey Malahov <Alexey.Malahov@baikalelectronics.ru> Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Paul Burton <paulburton@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: devicetree@vger.kernel.org Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
329 lines
7.2 KiB
C
329 lines
7.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Copyright (C) 2002 MontaVista Software Inc.
|
|
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
|
|
*/
|
|
#ifndef _ASM_FPU_H
|
|
#define _ASM_FPU_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/bitops.h>
|
|
|
|
#include <asm/mipsregs.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/cpu-features.h>
|
|
#include <asm/fpu_emulator.h>
|
|
#include <asm/hazards.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/current.h>
|
|
#include <asm/msa.h>
|
|
|
|
#ifdef CONFIG_MIPS_MT_FPAFF
|
|
#include <asm/mips_mt.h>
|
|
#endif
|
|
|
|
/*
|
|
* This enum specifies a mode in which we want the FPU to operate, for cores
|
|
* which implement the Status.FR bit. Note that the bottom bit of the value
|
|
* purposefully matches the desired value of the Status.FR bit.
|
|
*/
|
|
enum fpu_mode {
|
|
FPU_32BIT = 0, /* FR = 0 */
|
|
FPU_64BIT, /* FR = 1, FRE = 0 */
|
|
FPU_AS_IS,
|
|
FPU_HYBRID, /* FR = 1, FRE = 1 */
|
|
|
|
#define FPU_FR_MASK 0x1
|
|
};
|
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
|
|
|
extern void _save_fp(struct task_struct *);
|
|
extern void _restore_fp(struct task_struct *);
|
|
|
|
#define __disable_fpu() \
|
|
do { \
|
|
clear_c0_status(ST0_CU1); \
|
|
disable_fpu_hazard(); \
|
|
} while (0)
|
|
|
|
static inline int __enable_fpu(enum fpu_mode mode)
|
|
{
|
|
int fr;
|
|
|
|
switch (mode) {
|
|
case FPU_AS_IS:
|
|
/* just enable the FPU in its current mode */
|
|
set_c0_status(ST0_CU1);
|
|
enable_fpu_hazard();
|
|
return 0;
|
|
|
|
case FPU_HYBRID:
|
|
if (!cpu_has_fre)
|
|
return SIGFPE;
|
|
|
|
/* set FRE */
|
|
set_c0_config5(MIPS_CONF5_FRE);
|
|
goto fr_common;
|
|
|
|
case FPU_64BIT:
|
|
#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
|
|
defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_64BIT))
|
|
/* we only have a 32-bit FPU */
|
|
return SIGFPE;
|
|
#endif
|
|
fallthrough;
|
|
case FPU_32BIT:
|
|
if (cpu_has_fre) {
|
|
/* clear FRE */
|
|
clear_c0_config5(MIPS_CONF5_FRE);
|
|
}
|
|
fr_common:
|
|
/* set CU1 & change FR appropriately */
|
|
fr = (int)mode & FPU_FR_MASK;
|
|
change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
|
|
enable_fpu_hazard();
|
|
|
|
/* check FR has the desired value */
|
|
if (!!(read_c0_status() & ST0_FR) == !!fr)
|
|
return 0;
|
|
|
|
/* unsupported FR value */
|
|
__disable_fpu();
|
|
return SIGFPE;
|
|
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
return SIGFPE;
|
|
}
|
|
|
|
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
|
|
|
|
static inline int __is_fpu_owner(void)
|
|
{
|
|
return test_thread_flag(TIF_USEDFPU);
|
|
}
|
|
|
|
static inline int is_fpu_owner(void)
|
|
{
|
|
return cpu_has_fpu && __is_fpu_owner();
|
|
}
|
|
|
|
static inline int __own_fpu(void)
|
|
{
|
|
enum fpu_mode mode;
|
|
int ret;
|
|
|
|
if (test_thread_flag(TIF_HYBRID_FPREGS))
|
|
mode = FPU_HYBRID;
|
|
else
|
|
mode = !test_thread_flag(TIF_32BIT_FPREGS);
|
|
|
|
ret = __enable_fpu(mode);
|
|
if (ret)
|
|
return ret;
|
|
|
|
KSTK_STATUS(current) |= ST0_CU1;
|
|
if (mode == FPU_64BIT || mode == FPU_HYBRID)
|
|
KSTK_STATUS(current) |= ST0_FR;
|
|
else /* mode == FPU_32BIT */
|
|
KSTK_STATUS(current) &= ~ST0_FR;
|
|
|
|
set_thread_flag(TIF_USEDFPU);
|
|
return 0;
|
|
}
|
|
|
|
static inline int own_fpu_inatomic(int restore)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (cpu_has_fpu && !__is_fpu_owner()) {
|
|
ret = __own_fpu();
|
|
if (restore && !ret)
|
|
_restore_fp(current);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static inline int own_fpu(int restore)
|
|
{
|
|
int ret;
|
|
|
|
preempt_disable();
|
|
ret = own_fpu_inatomic(restore);
|
|
preempt_enable();
|
|
return ret;
|
|
}
|
|
|
|
static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
|
|
{
|
|
if (is_msa_enabled()) {
|
|
if (save) {
|
|
save_msa(tsk);
|
|
tsk->thread.fpu.fcr31 =
|
|
read_32bit_cp1_register(CP1_STATUS);
|
|
}
|
|
disable_msa();
|
|
clear_tsk_thread_flag(tsk, TIF_USEDMSA);
|
|
__disable_fpu();
|
|
} else if (is_fpu_owner()) {
|
|
if (save)
|
|
_save_fp(tsk);
|
|
__disable_fpu();
|
|
} else {
|
|
/* FPU should not have been left enabled with no owner */
|
|
WARN(read_c0_status() & ST0_CU1,
|
|
"Orphaned FPU left enabled");
|
|
}
|
|
KSTK_STATUS(tsk) &= ~ST0_CU1;
|
|
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
|
|
}
|
|
|
|
static inline void lose_fpu(int save)
|
|
{
|
|
preempt_disable();
|
|
lose_fpu_inatomic(save, current);
|
|
preempt_enable();
|
|
}
|
|
|
|
/**
|
|
* init_fp_ctx() - Initialize task FP context
|
|
* @target: The task whose FP context should be initialized.
|
|
*
|
|
* Initializes the FP context of the target task to sane default values if that
|
|
* target task does not already have valid FP context. Once the context has
|
|
* been initialized, the task will be marked as having used FP & thus having
|
|
* valid FP context.
|
|
*
|
|
* Returns: true if context is initialized, else false.
|
|
*/
|
|
static inline bool init_fp_ctx(struct task_struct *target)
|
|
{
|
|
/* If FP has been used then the target already has context */
|
|
if (tsk_used_math(target))
|
|
return false;
|
|
|
|
/* Begin with data registers set to all 1s... */
|
|
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
|
|
|
|
/* FCSR has been preset by `mips_set_personality_nan'. */
|
|
|
|
/*
|
|
* Record that the target has "used" math, such that the context
|
|
* just initialised, and any modifications made by the caller,
|
|
* aren't discarded.
|
|
*/
|
|
set_stopped_child_used_math(target);
|
|
|
|
return true;
|
|
}
|
|
|
|
static inline void save_fp(struct task_struct *tsk)
|
|
{
|
|
if (cpu_has_fpu)
|
|
_save_fp(tsk);
|
|
}
|
|
|
|
static inline void restore_fp(struct task_struct *tsk)
|
|
{
|
|
if (cpu_has_fpu)
|
|
_restore_fp(tsk);
|
|
}
|
|
|
|
static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
|
|
{
|
|
if (tsk == current) {
|
|
preempt_disable();
|
|
if (is_fpu_owner())
|
|
_save_fp(current);
|
|
preempt_enable();
|
|
}
|
|
|
|
return tsk->thread.fpu.fpr;
|
|
}
|
|
|
|
#else /* !CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
/*
|
|
* When FP support is disabled we provide only a minimal set of stub functions
|
|
* to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT.
|
|
*/
|
|
|
|
static inline int __enable_fpu(enum fpu_mode mode)
|
|
{
|
|
return SIGILL;
|
|
}
|
|
|
|
static inline void __disable_fpu(void)
|
|
{
|
|
/* no-op */
|
|
}
|
|
|
|
|
|
static inline int is_fpu_owner(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void clear_fpu_owner(void)
|
|
{
|
|
/* no-op */
|
|
}
|
|
|
|
static inline int own_fpu_inatomic(int restore)
|
|
{
|
|
return SIGILL;
|
|
}
|
|
|
|
static inline int own_fpu(int restore)
|
|
{
|
|
return SIGILL;
|
|
}
|
|
|
|
static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
|
|
{
|
|
/* no-op */
|
|
}
|
|
|
|
static inline void lose_fpu(int save)
|
|
{
|
|
/* no-op */
|
|
}
|
|
|
|
static inline bool init_fp_ctx(struct task_struct *target)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* The following functions should only be called in paths where we know that FP
|
|
* support is enabled, typically a path where own_fpu() or __enable_fpu() have
|
|
* returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile
|
|
* time that this should never happen, so calls to these functions should be
|
|
* optimized away & never actually be emitted.
|
|
*/
|
|
|
|
extern void save_fp(struct task_struct *tsk)
|
|
__compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
|
|
|
|
extern void _save_fp(struct task_struct *)
|
|
__compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
|
|
|
|
extern void restore_fp(struct task_struct *tsk)
|
|
__compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
|
|
|
|
extern void _restore_fp(struct task_struct *)
|
|
__compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
|
|
|
|
extern union fpureg *get_fpu_regs(struct task_struct *tsk)
|
|
__compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
|
|
|
|
#endif /* !CONFIG_MIPS_FP_SUPPORT */
|
|
#endif /* _ASM_FPU_H */
|