2005-11-15 23:53:48 +03:00
/*
* Low - level SPU handling
*
* ( C ) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author : Arnd Bergmann < arndb @ de . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
2005-12-06 06:52:24 +03:00
# undef DEBUG
2005-11-15 23:53:48 +03:00
# include <linux/interrupt.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/ptrace.h>
# include <linux/slab.h>
# include <linux/wait.h>
2006-11-23 02:46:49 +03:00
# include <linux/mm.h>
# include <linux/io.h>
2006-03-26 13:37:14 +04:00
# include <linux/mutex.h>
2007-07-17 15:05:52 +04:00
# include <linux/linux_logo.h>
2005-11-15 23:53:48 +03:00
# include <asm/spu.h>
2006-06-19 22:33:29 +04:00
# include <asm/spu_priv1.h>
2006-10-24 20:31:27 +04:00
# include <asm/xmon.h>
2007-07-20 23:39:46 +04:00
# include <asm/prom.h>
# include "spu_priv1_mmio.h"
2005-11-15 23:53:48 +03:00
2006-11-23 02:46:49 +03:00
const struct spu_management_ops * spu_management_ops ;
2007-04-23 23:08:29 +04:00
EXPORT_SYMBOL_GPL ( spu_management_ops ) ;
2006-06-19 22:33:29 +04:00
const struct spu_priv1_ops * spu_priv1_ops ;
2007-03-10 02:05:37 +03:00
static LIST_HEAD ( spu_full_list ) ;
static DEFINE_MUTEX ( spu_mutex ) ;
2007-04-29 20:10:39 +04:00
static DEFINE_SPINLOCK ( spu_list_lock ) ;
2007-03-10 02:05:37 +03:00
2006-06-19 22:33:29 +04:00
EXPORT_SYMBOL_GPL ( spu_priv1_ops ) ;
2007-03-10 02:05:37 +03:00
void spu_invalidate_slbs ( struct spu * spu )
{
struct spu_priv2 __iomem * priv2 = spu - > priv2 ;
if ( spu_mfc_sr1_get ( spu ) & MFC_STATE1_RELOCATE_MASK )
out_be64 ( & priv2 - > slb_invalidate_all_W , 0UL ) ;
}
EXPORT_SYMBOL_GPL ( spu_invalidate_slbs ) ;
/* This is called by the MM core when a segment size is changed, to
* request a flush of all the SPEs using a given mm
*/
void spu_flush_all_slbs ( struct mm_struct * mm )
{
struct spu * spu ;
unsigned long flags ;
spin_lock_irqsave ( & spu_list_lock , flags ) ;
list_for_each_entry ( spu , & spu_full_list , full_list ) {
if ( spu - > mm = = mm )
spu_invalidate_slbs ( spu ) ;
}
spin_unlock_irqrestore ( & spu_list_lock , flags ) ;
}
/* The hack below stinks... try to do something better one of
* these days . . . Does it even work properly with NR_CPUS = = 1 ?
*/
static inline void mm_needs_global_tlbie ( struct mm_struct * mm )
{
int nr = ( NR_CPUS > 1 ) ? NR_CPUS : NR_CPUS + 1 ;
/* Global TLBIE broadcast required with SPEs. */
__cpus_setall ( & mm - > cpu_vm_mask , nr ) ;
}
void spu_associate_mm ( struct spu * spu , struct mm_struct * mm )
{
unsigned long flags ;
spin_lock_irqsave ( & spu_list_lock , flags ) ;
spu - > mm = mm ;
spin_unlock_irqrestore ( & spu_list_lock , flags ) ;
if ( mm )
mm_needs_global_tlbie ( mm ) ;
}
EXPORT_SYMBOL_GPL ( spu_associate_mm ) ;
2005-11-15 23:53:48 +03:00
static int __spu_trap_invalid_dma ( struct spu * spu )
{
pr_debug ( " %s \n " , __FUNCTION__ ) ;
2006-10-04 19:26:14 +04:00
spu - > dma_callback ( spu , SPE_EVENT_INVALID_DMA ) ;
2005-11-15 23:53:48 +03:00
return 0 ;
}
static int __spu_trap_dma_align ( struct spu * spu )
{
pr_debug ( " %s \n " , __FUNCTION__ ) ;
2006-10-04 19:26:14 +04:00
spu - > dma_callback ( spu , SPE_EVENT_DMA_ALIGNMENT ) ;
2005-11-15 23:53:48 +03:00
return 0 ;
}
static int __spu_trap_error ( struct spu * spu )
{
pr_debug ( " %s \n " , __FUNCTION__ ) ;
2006-10-04 19:26:14 +04:00
spu - > dma_callback ( spu , SPE_EVENT_SPE_ERROR ) ;
2005-11-15 23:53:48 +03:00
return 0 ;
}
static void spu_restart_dma ( struct spu * spu )
{
struct spu_priv2 __iomem * priv2 = spu - > priv2 ;
2005-11-15 23:53:49 +03:00
2006-01-04 22:31:28 +03:00
if ( ! test_bit ( SPU_CONTEXT_SWITCH_PENDING , & spu - > flags ) )
2005-11-15 23:53:49 +03:00
out_be64 ( & priv2 - > mfc_control_RW , MFC_CNTL_RESTART_DMA_COMMAND ) ;
2005-11-15 23:53:48 +03:00
}
static int __spu_trap_data_seg ( struct spu * spu , unsigned long ea )
{
2005-11-15 23:53:52 +03:00
struct spu_priv2 __iomem * priv2 = spu - > priv2 ;
struct mm_struct * mm = spu - > mm ;
2006-06-19 22:33:23 +04:00
u64 esid , vsid , llp ;
2007-03-10 02:05:37 +03:00
int psize ;
2005-11-15 23:53:48 +03:00
pr_debug ( " %s \n " , __FUNCTION__ ) ;
2006-01-04 22:31:28 +03:00
if ( test_bit ( SPU_CONTEXT_SWITCH_ACTIVE , & spu - > flags ) ) {
2005-11-15 23:53:52 +03:00
/* SLBs are pre-loaded for context switch, so
* we should never get here !
*/
2005-11-15 23:53:49 +03:00
printk ( " %s: invalid access during switch! \n " , __func__ ) ;
return 1 ;
}
2006-10-24 20:31:18 +04:00
esid = ( ea & ESID_MASK ) | SLB_ESID_V ;
switch ( REGION_ID ( ea ) ) {
case USER_REGION_ID :
2007-05-08 10:27:27 +04:00
# ifdef CONFIG_PPC_MM_SLICES
psize = get_slice_psize ( mm , ea ) ;
# else
psize = mm - > context . user_psize ;
2006-10-24 20:31:18 +04:00
# endif
vsid = ( get_vsid ( mm - > context . id , ea ) < < SLB_VSID_SHIFT ) |
2007-03-10 02:05:37 +03:00
SLB_VSID_USER ;
2006-10-24 20:31:18 +04:00
break ;
case VMALLOC_REGION_ID :
2007-03-10 02:05:37 +03:00
if ( ea < VMALLOC_END )
psize = mmu_vmalloc_psize ;
else
psize = mmu_io_psize ;
2006-10-24 20:31:18 +04:00
vsid = ( get_kernel_vsid ( ea ) < < SLB_VSID_SHIFT ) |
2007-03-10 02:05:37 +03:00
SLB_VSID_KERNEL ;
2006-10-24 20:31:18 +04:00
break ;
case KERNEL_REGION_ID :
2007-03-10 02:05:37 +03:00
psize = mmu_linear_psize ;
2006-10-24 20:31:18 +04:00
vsid = ( get_kernel_vsid ( ea ) < < SLB_VSID_SHIFT ) |
2007-03-10 02:05:37 +03:00
SLB_VSID_KERNEL ;
2006-10-24 20:31:18 +04:00
break ;
default :
2005-11-15 23:53:52 +03:00
/* Future: support kernel segments so that drivers
* can use SPUs .
*/
2005-11-15 23:53:48 +03:00
pr_debug ( " invalid region access at %016lx \n " , ea ) ;
return 1 ;
}
2007-03-10 02:05:37 +03:00
llp = mmu_psize_defs [ psize ] . sllp ;
2005-11-15 23:53:48 +03:00
2005-11-15 23:53:52 +03:00
out_be64 ( & priv2 - > slb_index_W , spu - > slb_replace ) ;
2007-03-10 02:05:37 +03:00
out_be64 ( & priv2 - > slb_vsid_RW , vsid | llp ) ;
2005-11-15 23:53:52 +03:00
out_be64 ( & priv2 - > slb_esid_RW , esid ) ;
spu - > slb_replace + + ;
2005-11-15 23:53:48 +03:00
if ( spu - > slb_replace > = 8 )
spu - > slb_replace = 0 ;
spu_restart_dma ( spu ) ;
2007-06-29 04:58:03 +04:00
spu - > stats . slb_flt + + ;
2005-11-15 23:53:48 +03:00
return 0 ;
}
2005-11-15 23:53:49 +03:00
extern int hash_page ( unsigned long ea , unsigned long access , unsigned long trap ) ; //XXX
2005-11-15 23:53:52 +03:00
static int __spu_trap_data_map ( struct spu * spu , unsigned long ea , u64 dsisr )
2005-11-15 23:53:48 +03:00
{
2006-03-23 02:00:11 +03:00
pr_debug ( " %s, %lx, %lx \n " , __FUNCTION__ , dsisr , ea ) ;
2005-11-15 23:53:48 +03:00
2005-11-15 23:53:49 +03:00
/* Handle kernel space hash faults immediately.
User hash faults need to be deferred to process context . */
if ( ( dsisr & MFC_DSISR_PTE_NOT_FOUND )
& & REGION_ID ( ea ) ! = USER_REGION_ID
& & hash_page ( ea , _PAGE_PRESENT , 0x300 ) = = 0 ) {
spu_restart_dma ( spu ) ;
return 0 ;
}
2006-01-04 22:31:28 +03:00
if ( test_bit ( SPU_CONTEXT_SWITCH_ACTIVE , & spu - > flags ) ) {
2005-11-15 23:53:49 +03:00
printk ( " %s: invalid access during switch! \n " , __func__ ) ;
return 1 ;
}
2005-11-15 23:53:48 +03:00
2005-11-15 23:53:52 +03:00
spu - > dar = ea ;
spu - > dsisr = dsisr ;
mb ( ) ;
2006-06-19 22:33:33 +04:00
spu - > stop_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
return 0 ;
}
static irqreturn_t
2006-10-07 00:52:16 +04:00
spu_irq_class_0 ( int irq , void * data )
2005-11-15 23:53:48 +03:00
{
struct spu * spu ;
spu = data ;
spu - > class_0_pending = 1 ;
2006-06-19 22:33:33 +04:00
spu - > stop_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
return IRQ_HANDLED ;
}
2005-12-06 06:52:25 +03:00
int
2005-11-15 23:53:48 +03:00
spu_irq_class_0_bottom ( struct spu * spu )
{
2005-12-06 06:52:27 +03:00
unsigned long stat , mask ;
2007-01-12 03:52:41 +03:00
unsigned long flags ;
2005-11-15 23:53:48 +03:00
spu - > class_0_pending = 0 ;
2007-01-12 03:52:41 +03:00
spin_lock_irqsave ( & spu - > register_lock , flags ) ;
2006-01-04 22:31:30 +03:00
mask = spu_int_mask_get ( spu , 0 ) ;
stat = spu_int_stat_get ( spu , 0 ) ;
2005-11-15 23:53:48 +03:00
2005-12-06 06:52:27 +03:00
stat & = mask ;
2006-06-23 22:57:50 +04:00
if ( stat & 1 ) /* invalid DMA alignment */
2005-11-15 23:53:48 +03:00
__spu_trap_dma_align ( spu ) ;
2006-06-23 22:57:50 +04:00
if ( stat & 2 ) /* invalid MFC DMA */
__spu_trap_invalid_dma ( spu ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 4 ) /* error on SPU */
__spu_trap_error ( spu ) ;
2006-01-04 22:31:30 +03:00
spu_int_stat_clear ( spu , 0 , stat ) ;
2007-01-12 03:52:41 +03:00
spin_unlock_irqrestore ( & spu - > register_lock , flags ) ;
2005-12-06 06:52:25 +03:00
return ( stat & 0x7 ) ? - EIO : 0 ;
2005-11-15 23:53:48 +03:00
}
2005-12-06 06:52:25 +03:00
EXPORT_SYMBOL_GPL ( spu_irq_class_0_bottom ) ;
2005-11-15 23:53:48 +03:00
static irqreturn_t
2006-10-07 00:52:16 +04:00
spu_irq_class_1 ( int irq , void * data )
2005-11-15 23:53:48 +03:00
{
struct spu * spu ;
2005-11-15 23:53:52 +03:00
unsigned long stat , mask , dar , dsisr ;
2005-11-15 23:53:48 +03:00
spu = data ;
2005-11-15 23:53:52 +03:00
/* atomically read & clear class1 status. */
spin_lock ( & spu - > register_lock ) ;
2006-01-04 22:31:30 +03:00
mask = spu_int_mask_get ( spu , 1 ) ;
stat = spu_int_stat_get ( spu , 1 ) & mask ;
dar = spu_mfc_dar_get ( spu ) ;
dsisr = spu_mfc_dsisr_get ( spu ) ;
2005-12-09 21:04:18 +03:00
if ( stat & 2 ) /* mapping fault */
2006-01-04 22:31:30 +03:00
spu_mfc_dsisr_set ( spu , 0ul ) ;
spu_int_stat_clear ( spu , 1 , stat ) ;
2005-11-15 23:53:52 +03:00
spin_unlock ( & spu - > register_lock ) ;
2006-03-23 02:00:11 +03:00
pr_debug ( " %s: %lx %lx %lx %lx \n " , __FUNCTION__ , mask , stat ,
dar , dsisr ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 1 ) /* segment fault */
__spu_trap_data_seg ( spu , dar ) ;
if ( stat & 2 ) { /* mapping fault */
2005-11-15 23:53:52 +03:00
__spu_trap_data_map ( spu , dar , dsisr ) ;
2005-11-15 23:53:48 +03:00
}
if ( stat & 4 ) /* ls compare & suspend on get */
;
if ( stat & 8 ) /* ls compare & suspend on put */
;
return stat ? IRQ_HANDLED : IRQ_NONE ;
}
static irqreturn_t
2006-10-07 00:52:16 +04:00
spu_irq_class_2 ( int irq , void * data )
2005-11-15 23:53:48 +03:00
{
struct spu * spu ;
unsigned long stat ;
2005-12-06 06:52:27 +03:00
unsigned long mask ;
2005-11-15 23:53:48 +03:00
spu = data ;
2006-06-19 22:33:33 +04:00
spin_lock ( & spu - > register_lock ) ;
2006-01-04 22:31:30 +03:00
stat = spu_int_stat_get ( spu , 2 ) ;
mask = spu_int_mask_get ( spu , 2 ) ;
2006-06-19 22:33:33 +04:00
/* ignore interrupts we're not waiting for */
stat & = mask ;
/*
* mailbox interrupts ( 0x1 and 0x10 ) are level triggered .
* mask them now before acknowledging .
*/
if ( stat & 0x11 )
spu_int_mask_and ( spu , 2 , ~ ( stat & 0x11 ) ) ;
/* acknowledge all interrupts before the callbacks */
spu_int_stat_clear ( spu , 2 , stat ) ;
spin_unlock ( & spu - > register_lock ) ;
2005-11-15 23:53:48 +03:00
2005-12-06 06:52:27 +03:00
pr_debug ( " class 2 interrupt %d, %lx, %lx \n " , irq , stat , mask ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 1 ) /* PPC core mailbox */
2006-06-19 22:33:33 +04:00
spu - > ibox_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 2 ) /* SPU stop-and-signal */
2006-06-19 22:33:33 +04:00
spu - > stop_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 4 ) /* SPU halted */
2006-06-19 22:33:33 +04:00
spu - > stop_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 8 ) /* DMA tag group complete */
2006-06-19 22:33:33 +04:00
spu - > mfc_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
if ( stat & 0x10 ) /* SPU mailbox threshold */
2006-06-19 22:33:33 +04:00
spu - > wbox_callback ( spu ) ;
2005-11-15 23:53:48 +03:00
2007-06-29 04:58:03 +04:00
spu - > stats . class2_intr + + ;
2005-11-15 23:53:48 +03:00
return stat ? IRQ_HANDLED : IRQ_NONE ;
}
2006-07-03 15:36:01 +04:00
static int spu_request_irqs ( struct spu * spu )
2005-11-15 23:53:48 +03:00
{
2006-07-03 15:36:01 +04:00
int ret = 0 ;
2005-11-15 23:53:48 +03:00
2006-07-03 15:36:01 +04:00
if ( spu - > irqs [ 0 ] ! = NO_IRQ ) {
snprintf ( spu - > irq_c0 , sizeof ( spu - > irq_c0 ) , " spe%02d.0 " ,
spu - > number ) ;
ret = request_irq ( spu - > irqs [ 0 ] , spu_irq_class_0 ,
IRQF_DISABLED ,
spu - > irq_c0 , spu ) ;
if ( ret )
goto bail0 ;
}
if ( spu - > irqs [ 1 ] ! = NO_IRQ ) {
snprintf ( spu - > irq_c1 , sizeof ( spu - > irq_c1 ) , " spe%02d.1 " ,
spu - > number ) ;
ret = request_irq ( spu - > irqs [ 1 ] , spu_irq_class_1 ,
IRQF_DISABLED ,
spu - > irq_c1 , spu ) ;
if ( ret )
goto bail1 ;
}
if ( spu - > irqs [ 2 ] ! = NO_IRQ ) {
snprintf ( spu - > irq_c2 , sizeof ( spu - > irq_c2 ) , " spe%02d.2 " ,
spu - > number ) ;
ret = request_irq ( spu - > irqs [ 2 ] , spu_irq_class_2 ,
IRQF_DISABLED ,
spu - > irq_c2 , spu ) ;
if ( ret )
goto bail2 ;
}
return 0 ;
2005-11-15 23:53:48 +03:00
2006-07-03 15:36:01 +04:00
bail2 :
if ( spu - > irqs [ 1 ] ! = NO_IRQ )
free_irq ( spu - > irqs [ 1 ] , spu ) ;
bail1 :
if ( spu - > irqs [ 0 ] ! = NO_IRQ )
free_irq ( spu - > irqs [ 0 ] , spu ) ;
bail0 :
2005-11-15 23:53:48 +03:00
return ret ;
}
2006-07-03 15:36:01 +04:00
static void spu_free_irqs ( struct spu * spu )
2005-11-15 23:53:48 +03:00
{
2006-07-03 15:36:01 +04:00
if ( spu - > irqs [ 0 ] ! = NO_IRQ )
free_irq ( spu - > irqs [ 0 ] , spu ) ;
if ( spu - > irqs [ 1 ] ! = NO_IRQ )
free_irq ( spu - > irqs [ 1 ] , spu ) ;
if ( spu - > irqs [ 2 ] ! = NO_IRQ )
free_irq ( spu - > irqs [ 2 ] , spu ) ;
2005-11-15 23:53:48 +03:00
}
static void spu_init_channels ( struct spu * spu )
{
static const struct {
unsigned channel ;
unsigned count ;
} zero_list [ ] = {
{ 0x00 , 1 , } , { 0x01 , 1 , } , { 0x03 , 1 , } , { 0x04 , 1 , } ,
{ 0x18 , 1 , } , { 0x19 , 1 , } , { 0x1b , 1 , } , { 0x1d , 1 , } ,
} , count_list [ ] = {
{ 0x00 , 0 , } , { 0x03 , 0 , } , { 0x04 , 0 , } , { 0x15 , 16 , } ,
{ 0x17 , 1 , } , { 0x18 , 0 , } , { 0x19 , 0 , } , { 0x1b , 0 , } ,
{ 0x1c , 1 , } , { 0x1d , 0 , } , { 0x1e , 1 , } ,
} ;
2006-01-04 22:31:31 +03:00
struct spu_priv2 __iomem * priv2 ;
2005-11-15 23:53:48 +03:00
int i ;
priv2 = spu - > priv2 ;
/* initialize all channel data to zero */
for ( i = 0 ; i < ARRAY_SIZE ( zero_list ) ; i + + ) {
int count ;
out_be64 ( & priv2 - > spu_chnlcntptr_RW , zero_list [ i ] . channel ) ;
for ( count = 0 ; count < zero_list [ i ] . count ; count + + )
out_be64 ( & priv2 - > spu_chnldata_RW , 0 ) ;
}
/* initialize channel counts to meaningful values */
for ( i = 0 ; i < ARRAY_SIZE ( count_list ) ; i + + ) {
out_be64 ( & priv2 - > spu_chnlcntptr_RW , count_list [ i ] . channel ) ;
out_be64 ( & priv2 - > spu_chnlcnt_RW , count_list [ i ] . count ) ;
}
}
2006-10-04 19:26:12 +04:00
struct spu * spu_alloc_node ( int node )
2005-11-15 23:53:48 +03:00
{
2006-10-04 19:26:12 +04:00
struct spu * spu = NULL ;
2005-11-15 23:53:48 +03:00
2006-03-26 13:37:14 +04:00
mutex_lock ( & spu_mutex ) ;
2007-07-20 23:39:44 +04:00
if ( ! list_empty ( & cbe_spu_info [ node ] . free_spus ) ) {
spu = list_entry ( cbe_spu_info [ node ] . free_spus . next , struct spu ,
list ) ;
2005-11-15 23:53:48 +03:00
list_del_init ( & spu - > list ) ;
2006-10-24 20:31:15 +04:00
pr_debug ( " Got SPU %d %d \n " , spu - > number , spu - > node ) ;
2005-11-15 23:53:48 +03:00
}
2006-03-26 13:37:14 +04:00
mutex_unlock ( & spu_mutex ) ;
2005-11-15 23:53:48 +03:00
2007-04-23 23:08:14 +04:00
if ( spu )
spu_init_channels ( spu ) ;
2006-10-04 19:26:12 +04:00
return spu ;
}
EXPORT_SYMBOL_GPL ( spu_alloc_node ) ;
struct spu * spu_alloc ( void )
{
struct spu * spu = NULL ;
int node ;
for ( node = 0 ; node < MAX_NUMNODES ; node + + ) {
spu = spu_alloc_node ( node ) ;
if ( spu )
break ;
}
2005-11-15 23:53:48 +03:00
return spu ;
}
void spu_free ( struct spu * spu )
{
2006-03-26 13:37:14 +04:00
mutex_lock ( & spu_mutex ) ;
2007-07-20 23:39:44 +04:00
list_add_tail ( & spu - > list , & cbe_spu_info [ spu - > node ] . free_spus ) ;
2006-03-26 13:37:14 +04:00
mutex_unlock ( & spu_mutex ) ;
2005-11-15 23:53:48 +03:00
}
2005-12-06 06:52:21 +03:00
EXPORT_SYMBOL_GPL ( spu_free ) ;
2005-11-15 23:53:48 +03:00
2007-06-16 01:17:32 +04:00
static int spu_shutdown ( struct sys_device * sysdev )
{
struct spu * spu = container_of ( sysdev , struct spu , sysdev ) ;
spu_free_irqs ( spu ) ;
spu_destroy_spu ( spu ) ;
return 0 ;
}
2006-06-19 22:33:19 +04:00
struct sysdev_class spu_sysdev_class = {
2007-06-16 01:17:32 +04:00
set_kset_name ( " spu " ) ,
. shutdown = spu_shutdown ,
2006-06-19 22:33:19 +04:00
} ;
2006-10-24 20:31:23 +04:00
int spu_add_sysdev_attr ( struct sysdev_attribute * attr )
{
struct spu * spu ;
mutex_lock ( & spu_mutex ) ;
list_for_each_entry ( spu , & spu_full_list , full_list )
sysdev_create_file ( & spu - > sysdev , attr ) ;
mutex_unlock ( & spu_mutex ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( spu_add_sysdev_attr ) ;
int spu_add_sysdev_attr_group ( struct attribute_group * attrs )
{
struct spu * spu ;
mutex_lock ( & spu_mutex ) ;
list_for_each_entry ( spu , & spu_full_list , full_list )
sysfs_create_group ( & spu - > sysdev . kobj , attrs ) ;
mutex_unlock ( & spu_mutex ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( spu_add_sysdev_attr_group ) ;
void spu_remove_sysdev_attr ( struct sysdev_attribute * attr )
{
struct spu * spu ;
mutex_lock ( & spu_mutex ) ;
list_for_each_entry ( spu , & spu_full_list , full_list )
sysdev_remove_file ( & spu - > sysdev , attr ) ;
mutex_unlock ( & spu_mutex ) ;
}
EXPORT_SYMBOL_GPL ( spu_remove_sysdev_attr ) ;
void spu_remove_sysdev_attr_group ( struct attribute_group * attrs )
{
struct spu * spu ;
mutex_lock ( & spu_mutex ) ;
list_for_each_entry ( spu , & spu_full_list , full_list )
sysfs_remove_group ( & spu - > sysdev . kobj , attrs ) ;
mutex_unlock ( & spu_mutex ) ;
}
EXPORT_SYMBOL_GPL ( spu_remove_sysdev_attr_group ) ;
2006-06-19 22:33:19 +04:00
static int spu_create_sysdev ( struct spu * spu )
{
int ret ;
spu - > sysdev . id = spu - > number ;
spu - > sysdev . cls = & spu_sysdev_class ;
ret = sysdev_register ( & spu - > sysdev ) ;
if ( ret ) {
printk ( KERN_ERR " Can't register SPU %d with sysfs \n " ,
spu - > number ) ;
return ret ;
}
2006-11-20 20:45:02 +03:00
sysfs_add_device_to_node ( & spu - > sysdev , spu - > node ) ;
2006-06-19 22:33:19 +04:00
return 0 ;
}
2006-11-23 02:46:49 +03:00
static int __init create_spu ( void * data )
2005-11-15 23:53:48 +03:00
{
struct spu * spu ;
int ret ;
static int number ;
2007-03-10 02:05:37 +03:00
unsigned long flags ;
2007-07-20 23:39:33 +04:00
struct timespec ts ;
2005-11-15 23:53:48 +03:00
ret = - ENOMEM ;
2006-06-19 22:33:26 +04:00
spu = kzalloc ( sizeof ( * spu ) , GFP_KERNEL ) ;
2005-11-15 23:53:48 +03:00
if ( ! spu )
goto out ;
2006-11-23 02:46:49 +03:00
spin_lock_init ( & spu - > register_lock ) ;
mutex_lock ( & spu_mutex ) ;
spu - > number = number + + ;
mutex_unlock ( & spu_mutex ) ;
ret = spu_create_spu ( spu , data ) ;
2006-10-10 09:14:12 +04:00
2005-11-15 23:53:48 +03:00
if ( ret )
goto out_free ;
2006-10-24 20:31:14 +04:00
spu_mfc_sdr_setup ( spu ) ;
2006-01-04 22:31:30 +03:00
spu_mfc_sr1_set ( spu , 0x33 ) ;
2005-11-15 23:53:48 +03:00
ret = spu_request_irqs ( spu ) ;
if ( ret )
2006-11-23 02:46:49 +03:00
goto out_destroy ;
2005-11-15 23:53:48 +03:00
2006-06-19 22:33:19 +04:00
ret = spu_create_sysdev ( spu ) ;
if ( ret )
goto out_free_irqs ;
2006-11-23 02:46:49 +03:00
mutex_lock ( & spu_mutex ) ;
2007-03-10 02:05:37 +03:00
spin_lock_irqsave ( & spu_list_lock , flags ) ;
2007-07-20 23:39:44 +04:00
list_add ( & spu - > list , & cbe_spu_info [ spu - > node ] . free_spus ) ;
list_add ( & spu - > cbe_list , & cbe_spu_info [ spu - > node ] . spus ) ;
cbe_spu_info [ spu - > node ] . n_spus + + ;
2006-10-24 20:31:23 +04:00
list_add ( & spu - > full_list , & spu_full_list ) ;
2007-03-10 02:05:37 +03:00
spin_unlock_irqrestore ( & spu_list_lock , flags ) ;
2006-03-26 13:37:14 +04:00
mutex_unlock ( & spu_mutex ) ;
2005-11-15 23:53:48 +03:00
2007-07-20 23:39:33 +04:00
spu - > stats . util_state = SPU_UTIL_IDLE_LOADED ;
ktime_get_ts ( & ts ) ;
spu - > stats . tstamp = timespec_to_ns ( & ts ) ;
2007-06-29 04:58:07 +04:00
2007-07-20 23:39:45 +04:00
INIT_LIST_HEAD ( & spu - > aff_list ) ;
2005-11-15 23:53:48 +03:00
goto out ;
2006-06-19 22:33:19 +04:00
out_free_irqs :
spu_free_irqs ( spu ) ;
2006-11-23 02:46:49 +03:00
out_destroy :
spu_destroy_spu ( spu ) ;
2005-11-15 23:53:48 +03:00
out_free :
kfree ( spu ) ;
out :
return ret ;
}
2007-06-29 04:58:07 +04:00
static const char * spu_state_names [ ] = {
" user " , " system " , " iowait " , " idle "
} ;
static unsigned long long spu_acct_time ( struct spu * spu ,
enum spu_utilization_state state )
{
2007-07-20 23:39:33 +04:00
struct timespec ts ;
2007-06-29 04:58:07 +04:00
unsigned long long time = spu - > stats . times [ state ] ;
2007-07-20 23:39:33 +04:00
/*
* If the spu is idle or the context is stopped , utilization
* statistics are not updated . Apply the time delta from the
* last recorded state of the spu .
*/
if ( spu - > stats . util_state = = state ) {
ktime_get_ts ( & ts ) ;
time + = timespec_to_ns ( & ts ) - spu - > stats . tstamp ;
}
2007-06-29 04:58:07 +04:00
2007-07-20 23:39:33 +04:00
return time / NSEC_PER_MSEC ;
2007-06-29 04:58:07 +04:00
}
static ssize_t spu_stat_show ( struct sys_device * sysdev , char * buf )
{
struct spu * spu = container_of ( sysdev , struct spu , sysdev ) ;
return sprintf ( buf , " %s %llu %llu %llu %llu "
" %llu %llu %llu %llu %llu %llu %llu %llu \n " ,
2007-07-20 23:39:33 +04:00
spu_state_names [ spu - > stats . util_state ] ,
2007-06-29 04:58:07 +04:00
spu_acct_time ( spu , SPU_UTIL_USER ) ,
spu_acct_time ( spu , SPU_UTIL_SYSTEM ) ,
spu_acct_time ( spu , SPU_UTIL_IOWAIT ) ,
2007-07-20 23:39:33 +04:00
spu_acct_time ( spu , SPU_UTIL_IDLE_LOADED ) ,
2007-06-29 04:58:07 +04:00
spu - > stats . vol_ctx_switch ,
spu - > stats . invol_ctx_switch ,
spu - > stats . slb_flt ,
spu - > stats . hash_flt ,
spu - > stats . min_flt ,
spu - > stats . maj_flt ,
spu - > stats . class2_intr ,
spu - > stats . libassist ) ;
}
static SYSDEV_ATTR ( stat , 0644 , spu_stat_show , NULL ) ;
2007-07-20 23:39:44 +04:00
struct cbe_spu_info cbe_spu_info [ MAX_NUMNODES ] ;
EXPORT_SYMBOL_GPL ( cbe_spu_info ) ;
2007-07-20 23:39:46 +04:00
/* Hardcoded affinity idxs for QS20 */
# define SPES_PER_BE 8
static int QS20_reg_idxs [ SPES_PER_BE ] = { 0 , 2 , 4 , 6 , 7 , 5 , 3 , 1 } ;
static int QS20_reg_memory [ SPES_PER_BE ] = { 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 } ;
static struct spu * spu_lookup_reg ( int node , u32 reg )
{
struct spu * spu ;
list_for_each_entry ( spu , & cbe_spu_info [ node ] . spus , cbe_list ) {
if ( * ( u32 * ) get_property ( spu_devnode ( spu ) , " reg " , NULL ) = = reg )
return spu ;
}
return NULL ;
}
static void init_aff_QS20_harcoded ( void )
{
int node , i ;
struct spu * last_spu , * spu ;
u32 reg ;
for ( node = 0 ; node < MAX_NUMNODES ; node + + ) {
last_spu = NULL ;
for ( i = 0 ; i < SPES_PER_BE ; i + + ) {
reg = QS20_reg_idxs [ i ] ;
spu = spu_lookup_reg ( node , reg ) ;
if ( ! spu )
continue ;
spu - > has_mem_affinity = QS20_reg_memory [ reg ] ;
if ( last_spu )
list_add_tail ( & spu - > aff_list ,
& last_spu - > aff_list ) ;
last_spu = spu ;
}
}
}
static int of_has_vicinity ( void )
{
struct spu * spu ;
spu = list_entry ( cbe_spu_info [ 0 ] . spus . next , struct spu , cbe_list ) ;
return of_find_property ( spu_devnode ( spu ) , " vicinity " , NULL ) ! = NULL ;
}
2005-11-15 23:53:48 +03:00
static int __init init_spu_base ( void )
{
2007-04-23 23:08:28 +04:00
int i , ret = 0 ;
2005-11-15 23:53:48 +03:00
2007-07-20 23:39:44 +04:00
for ( i = 0 ; i < MAX_NUMNODES ; i + + ) {
INIT_LIST_HEAD ( & cbe_spu_info [ i ] . spus ) ;
INIT_LIST_HEAD ( & cbe_spu_info [ i ] . free_spus ) ;
}
2007-04-23 23:08:29 +04:00
2006-11-27 21:18:54 +03:00
if ( ! spu_management_ops )
2007-04-23 23:08:28 +04:00
goto out ;
2006-11-27 21:18:54 +03:00
2006-06-19 22:33:19 +04:00
/* create sysdev class for spus */
ret = sysdev_class_register ( & spu_sysdev_class ) ;
if ( ret )
2007-04-23 23:08:28 +04:00
goto out ;
2006-06-19 22:33:19 +04:00
2006-11-23 02:46:49 +03:00
ret = spu_enumerate_spus ( create_spu ) ;
2007-07-17 15:05:52 +04:00
if ( ret < 0 ) {
2006-11-23 02:46:49 +03:00
printk ( KERN_WARNING " %s: Error initializing spus \n " ,
__FUNCTION__ ) ;
2007-04-23 23:08:28 +04:00
goto out_unregister_sysdev_class ;
2005-11-15 23:53:48 +03:00
}
2006-10-24 20:31:27 +04:00
2007-07-17 15:05:52 +04:00
if ( ret > 0 ) {
/*
* We cannot put the forward declaration in
* < linux / linux_logo . h > because of conflicting session type
* conflicts for const and __initdata with different compiler
* versions
*/
extern const struct linux_logo logo_spe_clut224 ;
fb_append_extra_logo ( & logo_spe_clut224 , ret ) ;
}
2006-10-24 20:31:27 +04:00
xmon_register_spus ( & spu_full_list ) ;
2007-07-20 23:39:27 +04:00
crash_register_spus ( & spu_full_list ) ;
2007-06-29 04:58:07 +04:00
spu_add_sysdev_attr ( & attr_stat ) ;
2007-07-20 23:39:46 +04:00
if ( ! of_has_vicinity ( ) ) {
long root = of_get_flat_dt_root ( ) ;
if ( of_flat_dt_is_compatible ( root , " IBM,CPBW-1.0 " ) )
init_aff_QS20_harcoded ( ) ;
}
2007-04-23 23:08:28 +04:00
return 0 ;
out_unregister_sysdev_class :
sysdev_class_unregister ( & spu_sysdev_class ) ;
out :
2005-11-15 23:53:48 +03:00
return ret ;
}
module_init ( init_spu_base ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Arnd Bergmann <arndb@de.ibm.com> " ) ;