2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-10-08 19:55:02 +11:00
/*
* Copyright 2014 IBM Corp .
*/
# include <linux/workqueue.h>
2017-02-02 19:15:33 +01:00
# include <linux/sched/signal.h>
2017-02-08 18:51:29 +01:00
# include <linux/sched/mm.h>
2014-10-08 19:55:02 +11:00
# include <linux/pid.h>
# include <linux/mm.h>
# include <linux/moduleparam.h>
# undef MODULE_PARAM_PREFIX
# define MODULE_PARAM_PREFIX "cxl" "."
# include <asm/current.h>
# include <asm/copro.h>
# include <asm/mmu.h>
# include "cxl.h"
2015-01-09 20:34:36 +11:00
# include "trace.h"
2014-10-08 19:55:02 +11:00
2014-10-28 14:25:30 +11:00
static bool sste_matches ( struct cxl_sste * sste , struct copro_slb * slb )
{
return ( ( sste - > vsid_data = = cpu_to_be64 ( slb - > vsid ) ) & &
( sste - > esid_data = = cpu_to_be64 ( slb - > esid ) ) ) ;
}
/*
* This finds a free SSTE for the given SLB , or returns NULL if it ' s already in
* the segment table .
*/
2018-08-03 15:50:38 +05:30
static struct cxl_sste * find_free_sste ( struct cxl_context * ctx ,
2014-10-28 14:25:28 +11:00
struct copro_slb * slb )
2014-10-08 19:55:02 +11:00
{
2014-10-28 14:25:30 +11:00
struct cxl_sste * primary , * sste , * ret = NULL ;
2014-10-28 14:25:28 +11:00
unsigned int mask = ( ctx - > sst_size > > 7 ) - 1 ; /* SSTP0[SegTableSize] */
2014-10-28 14:25:27 +11:00
unsigned int entry ;
2014-10-28 14:25:28 +11:00
unsigned int hash ;
if ( slb - > vsid & SLB_VSID_B_1T )
hash = ( slb - > esid > > SID_SHIFT_1T ) & mask ;
else /* 256M */
hash = ( slb - > esid > > SID_SHIFT ) & mask ;
2014-10-08 19:55:02 +11:00
2014-10-28 14:25:28 +11:00
primary = ctx - > sstp + ( hash < < 3 ) ;
for ( entry = 0 , sste = primary ; entry < 8 ; entry + + , sste + + ) {
2014-10-28 14:25:30 +11:00
if ( ! ret & & ! ( be64_to_cpu ( sste - > esid_data ) & SLB_ESID_V ) )
ret = sste ;
if ( sste_matches ( sste , slb ) )
return NULL ;
2014-10-08 19:55:02 +11:00
}
2014-10-28 14:25:30 +11:00
if ( ret )
return ret ;
2014-10-28 14:25:28 +11:00
2014-10-08 19:55:02 +11:00
/* Nothing free, select an entry to cast out */
2014-10-28 14:25:30 +11:00
ret = primary + ctx - > sst_lru ;
2014-10-28 14:25:28 +11:00
ctx - > sst_lru = ( ctx - > sst_lru + 1 ) & 0x7 ;
2014-10-08 19:55:02 +11:00
2014-10-28 14:25:30 +11:00
return ret ;
2014-10-08 19:55:02 +11:00
}
static void cxl_load_segment ( struct cxl_context * ctx , struct copro_slb * slb )
{
/* mask is the group index, we search primary and secondary here. */
struct cxl_sste * sste ;
unsigned long flags ;
spin_lock_irqsave ( & ctx - > sste_lock , flags ) ;
2014-10-28 14:25:28 +11:00
sste = find_free_sste ( ctx , slb ) ;
2014-10-28 14:25:30 +11:00
if ( ! sste )
goto out_unlock ;
2014-10-08 19:55:02 +11:00
pr_devel ( " CXL Populating SST[%li]: %#llx %#llx \n " ,
sste - ctx - > sstp , slb - > vsid , slb - > esid ) ;
2015-01-09 20:34:36 +11:00
trace_cxl_ste_write ( ctx , sste - ctx - > sstp , slb - > esid , slb - > vsid ) ;
2014-10-08 19:55:02 +11:00
sste - > vsid_data = cpu_to_be64 ( slb - > vsid ) ;
sste - > esid_data = cpu_to_be64 ( slb - > esid ) ;
2014-10-28 14:25:30 +11:00
out_unlock :
2014-10-08 19:55:02 +11:00
spin_unlock_irqrestore ( & ctx - > sste_lock , flags ) ;
}
static int cxl_fault_segment ( struct cxl_context * ctx , struct mm_struct * mm ,
u64 ea )
{
struct copro_slb slb = { 0 , 0 } ;
int rc ;
if ( ! ( rc = copro_calculate_slb ( mm , ea , & slb ) ) ) {
cxl_load_segment ( ctx , & slb ) ;
}
return rc ;
}
static void cxl_ack_ae ( struct cxl_context * ctx )
{
unsigned long flags ;
2016-03-04 12:26:28 +01:00
cxl_ops - > ack_irq ( ctx , CXL_PSL_TFC_An_AE , 0 ) ;
2014-10-08 19:55:02 +11:00
spin_lock_irqsave ( & ctx - > lock , flags ) ;
ctx - > pending_fault = true ;
ctx - > fault_addr = ctx - > dar ;
ctx - > fault_dsisr = ctx - > dsisr ;
spin_unlock_irqrestore ( & ctx - > lock , flags ) ;
wake_up_all ( & ctx - > wq ) ;
}
static int cxl_handle_segment_miss ( struct cxl_context * ctx ,
struct mm_struct * mm , u64 ea )
{
int rc ;
pr_devel ( " CXL interrupt: Segment fault pe: %i ea: %#llx \n " , ctx - > pe , ea ) ;
2015-01-09 20:34:36 +11:00
trace_cxl_ste_miss ( ctx , ea ) ;
2014-10-08 19:55:02 +11:00
if ( ( rc = cxl_fault_segment ( ctx , mm , ea ) ) )
cxl_ack_ae ( ctx ) ;
else {
mb ( ) ; /* Order seg table write to TFC MMIO write */
2016-03-04 12:26:28 +01:00
cxl_ops - > ack_irq ( ctx , CXL_PSL_TFC_An_R , 0 ) ;
2014-10-08 19:55:02 +11:00
}
return IRQ_HANDLED ;
}
2017-06-22 15:07:27 +02:00
int cxl_handle_mm_fault ( struct mm_struct * mm , u64 dsisr , u64 dar )
2014-10-08 19:55:02 +11:00
{
2018-08-17 15:44:47 -07:00
vm_fault_t flt = 0 ;
2014-10-08 19:55:02 +11:00
int result ;
2014-12-04 11:00:14 +05:30
unsigned long access , flags , inv_flags = 0 ;
2014-10-08 19:55:02 +11:00
2017-07-27 11:54:55 +05:30
/*
* Add the fault handling cpu to task mm cpumask so that we
* can do a safe lockless page table walk when inserting the
2017-08-28 14:05:44 +05:30
* hash page table entry . This function get called with a
* valid mm for user space addresses . Hence using the if ( mm )
* check is sufficient here .
2017-07-27 11:54:55 +05:30
*/
2017-08-28 14:05:44 +05:30
if ( mm & & ! cpumask_test_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) ) {
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) ;
/*
* We need to make sure we walk the table only after
* we update the cpumask . The other side of the barrier
* is explained in serialize_against_pte_lookup ( )
*/
smp_mb ( ) ;
}
2014-10-08 19:55:02 +11:00
if ( ( result = copro_handle_mm_fault ( mm , dar , dsisr , & flt ) ) ) {
pr_devel ( " copro_handle_mm_fault failed: %#x \n " , result ) ;
2017-06-22 15:07:27 +02:00
return result ;
2014-10-08 19:55:02 +11:00
}
2017-04-12 16:34:07 +02:00
if ( ! radix_enabled ( ) ) {
/*
* update_mmu_cache ( ) will not have loaded the hash since current - > trap
* is not a 0x400 or 0x300 , so just call hash_page_mm ( ) here .
*/
access = _PAGE_PRESENT | _PAGE_READ ;
if ( dsisr & CXL_PSL_DSISR_An_S )
access | = _PAGE_WRITE ;
2019-04-17 18:29:14 +05:30
if ( ! mm & & ( get_region_id ( dar ) ! = USER_REGION_ID ) )
2017-06-22 15:07:27 +02:00
access | = _PAGE_PRIVILEGED ;
2017-04-12 16:34:07 +02:00
if ( dsisr & DSISR_NOHPTE )
inv_flags | = HPTE_NOHPTE_UPDATE ;
local_irq_save ( flags ) ;
hash_page_mm ( mm , dar , access , 0x300 , inv_flags ) ;
local_irq_restore ( flags ) ;
}
2017-06-22 15:07:27 +02:00
return 0 ;
}
static void cxl_handle_page_fault ( struct cxl_context * ctx ,
struct mm_struct * mm ,
u64 dsisr , u64 dar )
{
trace_cxl_pte_miss ( ctx , dsisr , dar ) ;
if ( cxl_handle_mm_fault ( mm , dsisr , dar ) ) {
cxl_ack_ae ( ctx ) ;
} else {
pr_devel ( " Page fault successfully handled for pe: %i! \n " , ctx - > pe ) ;
cxl_ops - > ack_irq ( ctx , CXL_PSL_TFC_An_R , 0 ) ;
}
2014-10-08 19:55:02 +11:00
}
2015-11-24 16:26:18 +05:30
/*
2017-04-07 16:11:55 +02:00
* Returns the mm_struct corresponding to the context ctx .
* mm_users = = 0 , the context may be in the process of being closed .
2015-11-24 16:26:18 +05:30
*/
static struct mm_struct * get_mem_context ( struct cxl_context * ctx )
{
2017-04-07 16:11:55 +02:00
if ( ctx - > mm = = NULL )
2015-11-24 16:26:18 +05:30
return NULL ;
2021-03-10 18:44:05 +01:00
if ( ! mmget_not_zero ( ctx - > mm ) )
2017-04-07 16:11:55 +02:00
return NULL ;
2015-11-24 16:26:18 +05:30
2017-04-07 16:11:55 +02:00
return ctx - > mm ;
2015-11-24 16:26:18 +05:30
}
2017-04-12 16:34:07 +02:00
static bool cxl_is_segment_miss ( struct cxl_context * ctx , u64 dsisr )
{
2017-06-13 17:41:05 +02:00
if ( ( cxl_is_power8 ( ) & & ( dsisr & CXL_PSL_DSISR_An_DS ) ) )
2017-04-12 16:34:07 +02:00
return true ;
return false ;
}
static bool cxl_is_page_fault ( struct cxl_context * ctx , u64 dsisr )
{
2017-06-13 17:41:05 +02:00
if ( ( cxl_is_power8 ( ) ) & & ( dsisr & CXL_PSL_DSISR_An_DM ) )
2017-04-12 16:34:07 +02:00
return true ;
2015-11-24 16:26:18 +05:30
2017-09-08 15:52:11 +02:00
if ( cxl_is_power9 ( ) )
return true ;
2017-06-13 17:41:05 +02:00
2017-04-12 16:34:07 +02:00
return false ;
}
2015-11-24 16:26:18 +05:30
2014-10-08 19:55:02 +11:00
void cxl_handle_fault ( struct work_struct * fault_work )
{
struct cxl_context * ctx =
container_of ( fault_work , struct cxl_context , fault_work ) ;
u64 dsisr = ctx - > dsisr ;
u64 dar = ctx - > dar ;
2015-05-27 16:07:11 +10:00
struct mm_struct * mm = NULL ;
2014-10-08 19:55:02 +11:00
2016-03-04 12:26:30 +01:00
if ( cpu_has_feature ( CPU_FTR_HVMODE ) ) {
if ( cxl_p2n_read ( ctx - > afu , CXL_PSL_DSISR_An ) ! = dsisr | |
cxl_p2n_read ( ctx - > afu , CXL_PSL_DAR_An ) ! = dar | |
cxl_p2n_read ( ctx - > afu , CXL_PSL_PEHandle_An ) ! = ctx - > pe ) {
/* Most likely explanation is harmless - a dedicated
* process has detached and these were cleared by the
* PSL purge , but warn about it just in case
*/
dev_notice ( & ctx - > afu - > dev , " cxl_handle_fault: Translation fault regs changed \n " ) ;
return ;
}
2014-10-08 19:55:02 +11:00
}
2014-12-08 19:17:58 +11:00
/* Early return if the context is being / has been detached */
if ( ctx - > status = = CLOSED ) {
cxl_ack_ae ( ctx ) ;
return ;
}
2014-10-08 19:55:02 +11:00
pr_devel ( " CXL BOTTOM HALF handling fault for afu pe: %i. "
" DSISR: %#llx DAR: %#llx \n " , ctx - > pe , dsisr , dar ) ;
2015-05-27 16:07:11 +10:00
if ( ! ctx - > kernel ) {
2015-11-24 16:26:18 +05:30
mm = get_mem_context ( ctx ) ;
if ( mm = = NULL ) {
pr_devel ( " %s: unable to get mm for pe=%d pid=%i \n " ,
__func__ , ctx - > pe , pid_nr ( ctx - > pid ) ) ;
2015-05-27 16:07:11 +10:00
cxl_ack_ae ( ctx ) ;
return ;
2015-11-24 16:26:18 +05:30
} else {
pr_devel ( " Handling page fault for pe=%d pid=%i \n " ,
ctx - > pe , pid_nr ( ctx - > pid ) ) ;
2015-05-27 16:07:11 +10:00
}
2014-10-08 19:55:02 +11:00
}
2017-04-12 16:34:07 +02:00
if ( cxl_is_segment_miss ( ctx , dsisr ) )
2014-10-08 19:55:02 +11:00
cxl_handle_segment_miss ( ctx , mm , dar ) ;
2017-04-12 16:34:07 +02:00
else if ( cxl_is_page_fault ( ctx , dsisr ) )
2014-10-08 19:55:02 +11:00
cxl_handle_page_fault ( ctx , mm , dsisr , dar ) ;
else
WARN ( 1 , " cxl_handle_fault has nothing to handle \n " ) ;
2015-05-27 16:07:11 +10:00
if ( mm )
mmput ( mm ) ;
2014-10-08 19:55:02 +11:00
}
static void cxl_prefault_one ( struct cxl_context * ctx , u64 ea )
{
struct mm_struct * mm ;
2015-11-24 16:26:18 +05:30
mm = get_mem_context ( ctx ) ;
if ( mm = = NULL ) {
2014-10-08 19:55:02 +11:00
pr_devel ( " cxl_prefault_one unable to get mm %i \n " ,
pid_nr ( ctx - > pid ) ) ;
return ;
}
2015-11-24 16:26:18 +05:30
cxl_fault_segment ( ctx , mm , ea ) ;
2014-10-08 19:55:02 +11:00
mmput ( mm ) ;
}
static u64 next_segment ( u64 ea , u64 vsid )
{
if ( vsid & SLB_VSID_B_1T )
ea | = ( 1ULL < < 40 ) - 1 ;
else
ea | = ( 1ULL < < 28 ) - 1 ;
return ea + 1 ;
}
static void cxl_prefault_vma ( struct cxl_context * ctx )
{
u64 ea , last_esid = 0 ;
struct copro_slb slb ;
struct vm_area_struct * vma ;
int rc ;
struct mm_struct * mm ;
2015-11-24 16:26:18 +05:30
mm = get_mem_context ( ctx ) ;
if ( mm = = NULL ) {
2014-10-08 19:55:02 +11:00
pr_devel ( " cxl_prefault_vm unable to get mm %i \n " ,
pid_nr ( ctx - > pid ) ) ;
2015-11-24 16:26:18 +05:30
return ;
2014-10-08 19:55:02 +11:00
}
2020-06-08 21:33:25 -07:00
mmap_read_lock ( mm ) ;
2014-10-08 19:55:02 +11:00
for ( vma = mm - > mmap ; vma ; vma = vma - > vm_next ) {
for ( ea = vma - > vm_start ; ea < vma - > vm_end ;
ea = next_segment ( ea , slb . vsid ) ) {
rc = copro_calculate_slb ( mm , ea , & slb ) ;
if ( rc )
continue ;
if ( last_esid = = slb . esid )
continue ;
cxl_load_segment ( ctx , & slb ) ;
last_esid = slb . esid ;
}
}
2020-06-08 21:33:25 -07:00
mmap_read_unlock ( mm ) ;
2014-10-08 19:55:02 +11:00
mmput ( mm ) ;
}
void cxl_prefault ( struct cxl_context * ctx , u64 wed )
{
switch ( ctx - > afu - > prefault_mode ) {
case CXL_PREFAULT_WED :
cxl_prefault_one ( ctx , wed ) ;
break ;
case CXL_PREFAULT_ALL :
cxl_prefault_vma ( ctx ) ;
break ;
default :
break ;
}
}