2019-05-29 17:18:00 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2017-07-11 04:04:30 +03:00
/*
* Copyright ( C ) 2012 Regents of the University of California
*/
# ifndef _ASM_RISCV_PROCESSOR_H
# define _ASM_RISCV_PROCESSOR_H
# include <linux/const.h>
2023-06-05 14:07:12 +03:00
# include <linux/cache.h>
2017-07-11 04:04:30 +03:00
2020-06-09 17:14:48 +03:00
# include <vdso/processor.h>
2017-07-11 04:04:30 +03:00
# include <asm/ptrace.h>
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap ' s .
*/
2018-12-10 09:21:46 +03:00
# define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
2017-07-11 04:04:30 +03:00
# define STACK_TOP TASK_SIZE
2022-04-05 10:13:04 +03:00
# ifdef CONFIG_64BIT
# define STACK_TOP_MAX TASK_SIZE_64
# else
# define STACK_TOP_MAX TASK_SIZE
# endif
2017-07-11 04:04:30 +03:00
# define STACK_ALIGN 16
# ifndef __ASSEMBLY__
struct task_struct ;
struct pt_regs ;
/* CPU-specific state of a task */
struct thread_struct {
/* Callee-saved registers */
unsigned long ra ;
unsigned long sp ; /* Kernel mode stack */
unsigned long s [ 12 ] ; /* s[0]: frame pointer */
struct __riscv_d_ext_state fstate ;
2020-12-17 19:01:44 +03:00
unsigned long bad_cause ;
2023-06-05 14:07:18 +03:00
unsigned long vstate_ctrl ;
2023-06-05 14:07:07 +03:00
struct __riscv_v_ext_state vstate ;
2017-07-11 04:04:30 +03:00
} ;
2021-07-02 07:54:21 +03:00
/* Whitelist the fstate from the task_struct for hardened usercopy */
static inline void arch_thread_struct_whitelist ( unsigned long * offset ,
unsigned long * size )
{
* offset = offsetof ( struct thread_struct , fstate ) ;
* size = sizeof_field ( struct thread_struct , fstate ) ;
}
2017-07-11 04:04:30 +03:00
# define INIT_THREAD { \
. sp = sizeof ( init_stack ) + ( long ) & init_stack , \
}
# define task_pt_regs(tsk) \
( ( struct pt_regs * ) ( task_stack_page ( tsk ) + THREAD_SIZE \
- ALIGN ( sizeof ( struct pt_regs ) , STACK_ALIGN ) ) )
2019-10-28 15:10:32 +03:00
# define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
2017-07-11 04:04:30 +03:00
# define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/* Do necessary setup to start up a newly executed thread. */
extern void start_thread ( struct pt_regs * regs ,
unsigned long pc , unsigned long sp ) ;
2021-09-30 01:02:14 +03:00
extern unsigned long __get_wchan ( struct task_struct * p ) ;
2017-07-11 04:04:30 +03:00
static inline void wait_for_interrupt ( void )
{
__asm__ __volatile__ ( " wfi " ) ;
}
struct device_node ;
2022-05-27 08:17:42 +03:00
int riscv_of_processor_hartid ( struct device_node * node , unsigned long * hartid ) ;
2023-06-07 23:28:26 +03:00
int riscv_early_of_processor_hartid ( struct device_node * node , unsigned long * hartid ) ;
2022-05-27 08:17:42 +03:00
int riscv_of_parent_hartid ( struct device_node * node , unsigned long * hartid ) ;
2017-07-11 04:04:30 +03:00
extern void riscv_fill_hwcap ( void ) ;
2021-03-05 14:33:32 +03:00
extern int arch_dup_task_struct ( struct task_struct * dst , struct task_struct * src ) ;
2017-07-11 04:04:30 +03:00
2023-06-05 14:07:12 +03:00
extern unsigned long signal_minsigstksz __ro_after_init ;
2023-06-05 14:07:18 +03:00
# ifdef CONFIG_RISCV_ISA_V
/* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
# define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg)
# define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current()
extern long riscv_v_vstate_ctrl_set_current ( unsigned long arg ) ;
extern long riscv_v_vstate_ctrl_get_current ( void ) ;
# endif /* CONFIG_RISCV_ISA_V */
2017-07-11 04:04:30 +03:00
# endif /* __ASSEMBLY__ */
# endif /* _ASM_RISCV_PROCESSOR_H */