2009-05-01 10:51:00 +04:00
/*
* SuperH Timer Support - TMU
*
* Copyright ( C ) 2009 Magnus Damm
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
2014-02-12 19:56:44 +04:00
# include <linux/clk.h>
# include <linux/clockchips.h>
# include <linux/clocksource.h>
# include <linux/delay.h>
# include <linux/err.h>
2009-05-01 10:51:00 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2014-02-12 19:56:44 +04:00
# include <linux/ioport.h>
2009-05-01 10:51:00 +04:00
# include <linux/irq.h>
2011-07-03 21:36:22 +04:00
# include <linux/module.h>
2014-02-12 19:56:44 +04:00
# include <linux/platform_device.h>
2012-03-14 01:40:00 +04:00
# include <linux/pm_domain.h>
2012-08-06 03:41:20 +04:00
# include <linux/pm_runtime.h>
2014-02-12 19:56:44 +04:00
# include <linux/sh_timer.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2009-05-01 10:51:00 +04:00
2014-01-28 15:36:48 +04:00
enum sh_tmu_model {
SH_TMU_LEGACY ,
SH_TMU ,
SH_TMU_SH3 ,
} ;
2014-01-28 01:04:17 +04:00
struct sh_tmu_device ;
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel {
2014-01-28 01:04:17 +04:00
struct sh_tmu_device * tmu ;
2014-01-28 01:04:17 +04:00
unsigned int index ;
2014-01-27 18:29:19 +04:00
2014-01-28 01:04:17 +04:00
void __iomem * base ;
2014-02-17 14:27:49 +04:00
int irq ;
2014-01-27 18:29:19 +04:00
2009-05-01 10:51:00 +04:00
unsigned long rate ;
unsigned long periodic ;
struct clock_event_device ced ;
struct clocksource cs ;
2012-08-06 03:41:20 +04:00
bool cs_enabled ;
2012-08-06 03:48:17 +04:00
unsigned int enable_count ;
2009-05-01 10:51:00 +04:00
} ;
2014-01-28 01:04:17 +04:00
struct sh_tmu_device {
2014-01-27 18:29:19 +04:00
struct platform_device * pdev ;
void __iomem * mapbase ;
struct clk * clk ;
2014-01-28 15:36:48 +04:00
enum sh_tmu_model model ;
2014-01-28 01:04:17 +04:00
struct sh_tmu_channel * channels ;
unsigned int num_channels ;
2014-01-28 15:36:48 +04:00
bool has_clockevent ;
bool has_clocksource ;
2014-01-27 18:29:19 +04:00
} ;
2012-05-25 08:39:09 +04:00
static DEFINE_RAW_SPINLOCK ( sh_tmu_lock ) ;
2009-05-01 10:51:00 +04:00
# define TSTR -1 /* shared register */
# define TCOR 0 /* channel register */
# define TCNT 1 /* channel register */
# define TCR 2 /* channel register */
2014-01-29 03:33:08 +04:00
# define TCR_UNF (1 << 8)
# define TCR_UNIE (1 << 5)
# define TCR_TPSC_CLK4 (0 << 0)
# define TCR_TPSC_CLK16 (1 << 0)
# define TCR_TPSC_CLK64 (2 << 0)
# define TCR_TPSC_CLK256 (3 << 0)
# define TCR_TPSC_CLK1024 (4 << 0)
# define TCR_TPSC_MASK (7 << 0)
2014-01-27 18:29:19 +04:00
static inline unsigned long sh_tmu_read ( struct sh_tmu_channel * ch , int reg_nr )
2009-05-01 10:51:00 +04:00
{
unsigned long offs ;
2014-01-28 15:36:48 +04:00
if ( reg_nr = = TSTR ) {
switch ( ch - > tmu - > model ) {
case SH_TMU_LEGACY :
return ioread8 ( ch - > tmu - > mapbase ) ;
case SH_TMU_SH3 :
return ioread8 ( ch - > tmu - > mapbase + 2 ) ;
case SH_TMU :
return ioread8 ( ch - > tmu - > mapbase + 4 ) ;
}
}
2009-05-01 10:51:00 +04:00
offs = reg_nr < < 2 ;
if ( reg_nr = = TCR )
2014-01-28 01:04:17 +04:00
return ioread16 ( ch - > base + offs ) ;
2009-05-01 10:51:00 +04:00
else
2014-01-28 01:04:17 +04:00
return ioread32 ( ch - > base + offs ) ;
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static inline void sh_tmu_write ( struct sh_tmu_channel * ch , int reg_nr ,
2009-05-01 10:51:00 +04:00
unsigned long value )
{
unsigned long offs ;
if ( reg_nr = = TSTR ) {
2014-01-28 15:36:48 +04:00
switch ( ch - > tmu - > model ) {
case SH_TMU_LEGACY :
return iowrite8 ( value , ch - > tmu - > mapbase ) ;
case SH_TMU_SH3 :
return iowrite8 ( value , ch - > tmu - > mapbase + 2 ) ;
case SH_TMU :
return iowrite8 ( value , ch - > tmu - > mapbase + 4 ) ;
}
2009-05-01 10:51:00 +04:00
}
offs = reg_nr < < 2 ;
if ( reg_nr = = TCR )
2014-01-28 01:04:17 +04:00
iowrite16 ( value , ch - > base + offs ) ;
2009-05-01 10:51:00 +04:00
else
2014-01-28 01:04:17 +04:00
iowrite32 ( value , ch - > base + offs ) ;
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static void sh_tmu_start_stop_ch ( struct sh_tmu_channel * ch , int start )
2009-05-01 10:51:00 +04:00
{
unsigned long flags , value ;
/* start stop register shared by multiple timer channels */
2012-05-25 08:39:09 +04:00
raw_spin_lock_irqsave ( & sh_tmu_lock , flags ) ;
2014-01-27 18:29:19 +04:00
value = sh_tmu_read ( ch , TSTR ) ;
2009-05-01 10:51:00 +04:00
if ( start )
2014-01-28 01:04:17 +04:00
value | = 1 < < ch - > index ;
2009-05-01 10:51:00 +04:00
else
2014-01-28 01:04:17 +04:00
value & = ~ ( 1 < < ch - > index ) ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
sh_tmu_write ( ch , TSTR , value ) ;
2012-05-25 08:39:09 +04:00
raw_spin_unlock_irqrestore ( & sh_tmu_lock , flags ) ;
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static int __sh_tmu_enable ( struct sh_tmu_channel * ch )
2009-05-01 10:51:00 +04:00
{
int ret ;
2011-05-31 10:23:20 +04:00
/* enable clock */
2014-01-27 18:29:19 +04:00
ret = clk_enable ( ch - > tmu - > clk ) ;
2009-05-01 10:51:00 +04:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
dev_err ( & ch - > tmu - > pdev - > dev , " ch%u: cannot enable clock \n " ,
ch - > index ) ;
2009-05-01 10:51:00 +04:00
return ret ;
}
/* make sure channel is disabled */
2014-01-27 18:29:19 +04:00
sh_tmu_start_stop_ch ( ch , 0 ) ;
2009-05-01 10:51:00 +04:00
/* maximum timeout */
2014-01-27 18:29:19 +04:00
sh_tmu_write ( ch , TCOR , 0xffffffff ) ;
sh_tmu_write ( ch , TCNT , 0xffffffff ) ;
2009-05-01 10:51:00 +04:00
/* configure channel to parent clock / 4, irq off */
2014-01-27 18:29:19 +04:00
ch - > rate = clk_get_rate ( ch - > tmu - > clk ) / 4 ;
2014-01-29 03:33:08 +04:00
sh_tmu_write ( ch , TCR , TCR_TPSC_CLK4 ) ;
2009-05-01 10:51:00 +04:00
/* enable channel */
2014-01-27 18:29:19 +04:00
sh_tmu_start_stop_ch ( ch , 1 ) ;
2009-05-01 10:51:00 +04:00
return 0 ;
}
2014-01-27 18:29:19 +04:00
static int sh_tmu_enable ( struct sh_tmu_channel * ch )
2012-08-06 03:48:17 +04:00
{
2014-01-27 18:29:19 +04:00
if ( ch - > enable_count + + > 0 )
2012-08-06 03:48:17 +04:00
return 0 ;
2014-01-27 18:29:19 +04:00
pm_runtime_get_sync ( & ch - > tmu - > pdev - > dev ) ;
dev_pm_syscore_device ( & ch - > tmu - > pdev - > dev , true ) ;
2012-08-06 03:48:17 +04:00
2014-01-27 18:29:19 +04:00
return __sh_tmu_enable ( ch ) ;
2012-08-06 03:48:17 +04:00
}
2014-01-27 18:29:19 +04:00
static void __sh_tmu_disable ( struct sh_tmu_channel * ch )
2009-05-01 10:51:00 +04:00
{
/* disable channel */
2014-01-27 18:29:19 +04:00
sh_tmu_start_stop_ch ( ch , 0 ) ;
2009-05-01 10:51:00 +04:00
2009-06-17 09:04:04 +04:00
/* disable interrupts in TMU block */
2014-01-29 03:33:08 +04:00
sh_tmu_write ( ch , TCR , TCR_TPSC_CLK4 ) ;
2009-06-17 09:04:04 +04:00
2011-05-31 10:23:20 +04:00
/* stop clock */
2014-01-27 18:29:19 +04:00
clk_disable ( ch - > tmu - > clk ) ;
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static void sh_tmu_disable ( struct sh_tmu_channel * ch )
2012-08-06 03:48:17 +04:00
{
2014-01-27 18:29:19 +04:00
if ( WARN_ON ( ch - > enable_count = = 0 ) )
2012-08-06 03:48:17 +04:00
return ;
2014-01-27 18:29:19 +04:00
if ( - - ch - > enable_count > 0 )
2012-08-06 03:48:17 +04:00
return ;
2014-01-27 18:29:19 +04:00
__sh_tmu_disable ( ch ) ;
2012-08-06 03:48:17 +04:00
2014-01-27 18:29:19 +04:00
dev_pm_syscore_device ( & ch - > tmu - > pdev - > dev , false ) ;
pm_runtime_put ( & ch - > tmu - > pdev - > dev ) ;
2012-08-06 03:48:17 +04:00
}
2014-01-27 18:29:19 +04:00
static void sh_tmu_set_next ( struct sh_tmu_channel * ch , unsigned long delta ,
2009-05-01 10:51:00 +04:00
int periodic )
{
/* stop timer */
2014-01-27 18:29:19 +04:00
sh_tmu_start_stop_ch ( ch , 0 ) ;
2009-05-01 10:51:00 +04:00
/* acknowledge interrupt */
2014-01-27 18:29:19 +04:00
sh_tmu_read ( ch , TCR ) ;
2009-05-01 10:51:00 +04:00
/* enable interrupt */
2014-01-29 03:33:08 +04:00
sh_tmu_write ( ch , TCR , TCR_UNIE | TCR_TPSC_CLK4 ) ;
2009-05-01 10:51:00 +04:00
/* reload delta value in case of periodic timer */
if ( periodic )
2014-01-27 18:29:19 +04:00
sh_tmu_write ( ch , TCOR , delta ) ;
2009-05-01 10:51:00 +04:00
else
2014-01-27 18:29:19 +04:00
sh_tmu_write ( ch , TCOR , 0xffffffff ) ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
sh_tmu_write ( ch , TCNT , delta ) ;
2009-05-01 10:51:00 +04:00
/* start timer */
2014-01-27 18:29:19 +04:00
sh_tmu_start_stop_ch ( ch , 1 ) ;
2009-05-01 10:51:00 +04:00
}
static irqreturn_t sh_tmu_interrupt ( int irq , void * dev_id )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = dev_id ;
2009-05-01 10:51:00 +04:00
/* disable or acknowledge interrupt */
2014-01-27 18:29:19 +04:00
if ( ch - > ced . mode = = CLOCK_EVT_MODE_ONESHOT )
2014-01-29 03:33:08 +04:00
sh_tmu_write ( ch , TCR , TCR_TPSC_CLK4 ) ;
2009-05-01 10:51:00 +04:00
else
2014-01-29 03:33:08 +04:00
sh_tmu_write ( ch , TCR , TCR_UNIE | TCR_TPSC_CLK4 ) ;
2009-05-01 10:51:00 +04:00
/* notify clockevent layer */
2014-01-27 18:29:19 +04:00
ch - > ced . event_handler ( & ch - > ced ) ;
2009-05-01 10:51:00 +04:00
return IRQ_HANDLED ;
}
2014-01-27 18:29:19 +04:00
static struct sh_tmu_channel * cs_to_sh_tmu ( struct clocksource * cs )
2009-05-01 10:51:00 +04:00
{
2014-01-27 18:29:19 +04:00
return container_of ( cs , struct sh_tmu_channel , cs ) ;
2009-05-01 10:51:00 +04:00
}
static cycle_t sh_tmu_clocksource_read ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = cs_to_sh_tmu ( cs ) ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
return sh_tmu_read ( ch , TCNT ) ^ 0xffffffff ;
2009-05-01 10:51:00 +04:00
}
static int sh_tmu_clocksource_enable ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = cs_to_sh_tmu ( cs ) ;
2011-04-25 17:38:37 +04:00
int ret ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
if ( WARN_ON ( ch - > cs_enabled ) )
2012-08-06 03:48:17 +04:00
return 0 ;
2014-01-27 18:29:19 +04:00
ret = sh_tmu_enable ( ch ) ;
2012-08-06 03:41:20 +04:00
if ( ! ret ) {
2014-01-27 18:29:19 +04:00
__clocksource_updatefreq_hz ( cs , ch - > rate ) ;
ch - > cs_enabled = true ;
2012-08-06 03:41:20 +04:00
}
2012-08-06 03:48:17 +04:00
2011-04-25 17:38:37 +04:00
return ret ;
2009-05-01 10:51:00 +04:00
}
static void sh_tmu_clocksource_disable ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = cs_to_sh_tmu ( cs ) ;
2012-08-06 03:41:20 +04:00
2014-01-27 18:29:19 +04:00
if ( WARN_ON ( ! ch - > cs_enabled ) )
2012-08-06 03:48:17 +04:00
return ;
2012-08-06 03:41:20 +04:00
2014-01-27 18:29:19 +04:00
sh_tmu_disable ( ch ) ;
ch - > cs_enabled = false ;
2012-08-06 03:41:20 +04:00
}
static void sh_tmu_clocksource_suspend ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = cs_to_sh_tmu ( cs ) ;
2012-08-06 03:41:20 +04:00
2014-01-27 18:29:19 +04:00
if ( ! ch - > cs_enabled )
2012-08-06 03:48:17 +04:00
return ;
2012-08-06 03:41:20 +04:00
2014-01-27 18:29:19 +04:00
if ( - - ch - > enable_count = = 0 ) {
__sh_tmu_disable ( ch ) ;
pm_genpd_syscore_poweroff ( & ch - > tmu - > pdev - > dev ) ;
2012-08-06 03:48:17 +04:00
}
2012-08-06 03:41:20 +04:00
}
static void sh_tmu_clocksource_resume ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = cs_to_sh_tmu ( cs ) ;
2012-08-06 03:41:20 +04:00
2014-01-27 18:29:19 +04:00
if ( ! ch - > cs_enabled )
2012-08-06 03:48:17 +04:00
return ;
2014-01-27 18:29:19 +04:00
if ( ch - > enable_count + + = = 0 ) {
pm_genpd_syscore_poweron ( & ch - > tmu - > pdev - > dev ) ;
__sh_tmu_enable ( ch ) ;
2012-08-06 03:48:17 +04:00
}
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static int sh_tmu_register_clocksource ( struct sh_tmu_channel * ch ,
2014-02-19 20:00:31 +04:00
const char * name )
2009-05-01 10:51:00 +04:00
{
2014-01-27 18:29:19 +04:00
struct clocksource * cs = & ch - > cs ;
2009-05-01 10:51:00 +04:00
cs - > name = name ;
2014-02-19 20:00:31 +04:00
cs - > rating = 200 ;
2009-05-01 10:51:00 +04:00
cs - > read = sh_tmu_clocksource_read ;
cs - > enable = sh_tmu_clocksource_enable ;
cs - > disable = sh_tmu_clocksource_disable ;
2012-08-06 03:41:20 +04:00
cs - > suspend = sh_tmu_clocksource_suspend ;
cs - > resume = sh_tmu_clocksource_resume ;
2009-05-01 10:51:00 +04:00
cs - > mask = CLOCKSOURCE_MASK ( 32 ) ;
cs - > flags = CLOCK_SOURCE_IS_CONTINUOUS ;
2010-06-01 01:45:48 +04:00
2014-01-28 01:04:17 +04:00
dev_info ( & ch - > tmu - > pdev - > dev , " ch%u: used as clock source \n " ,
ch - > index ) ;
2011-04-25 17:38:37 +04:00
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz ( cs , 1 ) ;
2009-05-01 10:51:00 +04:00
return 0 ;
}
2014-01-27 18:29:19 +04:00
static struct sh_tmu_channel * ced_to_sh_tmu ( struct clock_event_device * ced )
2009-05-01 10:51:00 +04:00
{
2014-01-27 18:29:19 +04:00
return container_of ( ced , struct sh_tmu_channel , ced ) ;
2009-05-01 10:51:00 +04:00
}
2014-01-27 18:29:19 +04:00
static void sh_tmu_clock_event_start ( struct sh_tmu_channel * ch , int periodic )
2009-05-01 10:51:00 +04:00
{
2014-01-27 18:29:19 +04:00
struct clock_event_device * ced = & ch - > ced ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
sh_tmu_enable ( ch ) ;
2009-05-01 10:51:00 +04:00
2014-01-27 18:29:19 +04:00
clockevents_config ( ced , ch - > rate ) ;
2009-05-01 10:51:00 +04:00
if ( periodic ) {
2014-01-27 18:29:19 +04:00
ch - > periodic = ( ch - > rate + HZ / 2 ) / HZ ;
sh_tmu_set_next ( ch , ch - > periodic , 1 ) ;
2009-05-01 10:51:00 +04:00
}
}
static void sh_tmu_clock_event_mode ( enum clock_event_mode mode ,
struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = ced_to_sh_tmu ( ced ) ;
2009-05-01 10:51:00 +04:00
int disabled = 0 ;
/* deal with old setting first */
switch ( ced - > mode ) {
case CLOCK_EVT_MODE_PERIODIC :
case CLOCK_EVT_MODE_ONESHOT :
2014-01-27 18:29:19 +04:00
sh_tmu_disable ( ch ) ;
2009-05-01 10:51:00 +04:00
disabled = 1 ;
break ;
default :
break ;
}
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2014-01-27 18:29:19 +04:00
dev_info ( & ch - > tmu - > pdev - > dev ,
2014-01-28 01:04:17 +04:00
" ch%u: used for periodic clock events \n " , ch - > index ) ;
2014-01-27 18:29:19 +04:00
sh_tmu_clock_event_start ( ch , 1 ) ;
2009-05-01 10:51:00 +04:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
2014-01-27 18:29:19 +04:00
dev_info ( & ch - > tmu - > pdev - > dev ,
2014-01-28 01:04:17 +04:00
" ch%u: used for oneshot clock events \n " , ch - > index ) ;
2014-01-27 18:29:19 +04:00
sh_tmu_clock_event_start ( ch , 0 ) ;
2009-05-01 10:51:00 +04:00
break ;
case CLOCK_EVT_MODE_UNUSED :
if ( ! disabled )
2014-01-27 18:29:19 +04:00
sh_tmu_disable ( ch ) ;
2009-05-01 10:51:00 +04:00
break ;
case CLOCK_EVT_MODE_SHUTDOWN :
default :
break ;
}
}
static int sh_tmu_clock_event_next ( unsigned long delta ,
struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_tmu_channel * ch = ced_to_sh_tmu ( ced ) ;
2009-05-01 10:51:00 +04:00
BUG_ON ( ced - > mode ! = CLOCK_EVT_MODE_ONESHOT ) ;
/* program new delta value */
2014-01-27 18:29:19 +04:00
sh_tmu_set_next ( ch , delta , 0 ) ;
2009-05-01 10:51:00 +04:00
return 0 ;
}
2012-08-06 03:41:20 +04:00
static void sh_tmu_clock_event_suspend ( struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
pm_genpd_syscore_poweroff ( & ced_to_sh_tmu ( ced ) - > tmu - > pdev - > dev ) ;
2012-08-06 03:41:20 +04:00
}
static void sh_tmu_clock_event_resume ( struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
pm_genpd_syscore_poweron ( & ced_to_sh_tmu ( ced ) - > tmu - > pdev - > dev ) ;
2012-08-06 03:41:20 +04:00
}
2014-01-27 18:29:19 +04:00
static void sh_tmu_register_clockevent ( struct sh_tmu_channel * ch ,
2014-02-19 20:00:31 +04:00
const char * name )
2009-05-01 10:51:00 +04:00
{
2014-01-27 18:29:19 +04:00
struct clock_event_device * ced = & ch - > ced ;
2009-05-01 10:51:00 +04:00
int ret ;
ced - > name = name ;
ced - > features = CLOCK_EVT_FEAT_PERIODIC ;
ced - > features | = CLOCK_EVT_FEAT_ONESHOT ;
2014-02-19 20:00:31 +04:00
ced - > rating = 200 ;
2009-05-01 10:51:00 +04:00
ced - > cpumask = cpumask_of ( 0 ) ;
ced - > set_next_event = sh_tmu_clock_event_next ;
ced - > set_mode = sh_tmu_clock_event_mode ;
2012-08-06 03:41:20 +04:00
ced - > suspend = sh_tmu_clock_event_suspend ;
ced - > resume = sh_tmu_clock_event_resume ;
2009-05-01 10:51:00 +04:00
2014-01-28 01:04:17 +04:00
dev_info ( & ch - > tmu - > pdev - > dev , " ch%u: used for clock events \n " ,
ch - > index ) ;
2012-06-11 12:10:16 +04:00
clockevents_config_and_register ( ced , 1 , 0x300 , 0xffffffff ) ;
2010-02-25 10:37:46 +03:00
2014-01-27 18:29:19 +04:00
ret = request_irq ( ch - > irq , sh_tmu_interrupt ,
2014-02-17 14:27:49 +04:00
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING ,
2014-01-27 18:29:19 +04:00
dev_name ( & ch - > tmu - > pdev - > dev ) , ch ) ;
2009-05-01 10:51:00 +04:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
dev_err ( & ch - > tmu - > pdev - > dev , " ch%u: failed to request irq %d \n " ,
ch - > index , ch - > irq ) ;
2009-05-01 10:51:00 +04:00
return ;
}
}
2014-02-17 19:04:16 +04:00
static int sh_tmu_register ( struct sh_tmu_channel * ch , const char * name ,
2014-02-19 20:00:31 +04:00
bool clockevent , bool clocksource )
2009-05-01 10:51:00 +04:00
{
2014-01-28 15:36:48 +04:00
if ( clockevent ) {
ch - > tmu - > has_clockevent = true ;
2014-02-19 20:00:31 +04:00
sh_tmu_register_clockevent ( ch , name ) ;
2014-01-28 15:36:48 +04:00
} else if ( clocksource ) {
ch - > tmu - > has_clocksource = true ;
2014-02-19 20:00:31 +04:00
sh_tmu_register_clocksource ( ch , name ) ;
2014-01-28 15:36:48 +04:00
}
2009-05-01 10:51:00 +04:00
return 0 ;
}
2014-01-28 15:36:48 +04:00
static int sh_tmu_channel_setup ( struct sh_tmu_channel * ch , unsigned int index ,
bool clockevent , bool clocksource ,
2014-01-28 01:04:17 +04:00
struct sh_tmu_device * tmu )
{
2014-01-28 15:36:48 +04:00
/* Skip unused channels. */
if ( ! clockevent & & ! clocksource )
return 0 ;
2014-01-28 01:04:17 +04:00
ch - > tmu = tmu ;
2014-01-28 15:36:48 +04:00
if ( tmu - > model = = SH_TMU_LEGACY ) {
struct sh_timer_config * cfg = tmu - > pdev - > dev . platform_data ;
/*
* The SH3 variant ( SH770x , SH7705 , SH7710 and SH7720 ) maps
* channel registers blocks at base + 2 + 12 * index , while all
* other variants map them at base + 4 + 12 * index . We can
* compute the index by just dividing by 12 , the 2 bytes or 4
* bytes offset being hidden by the integer division .
*/
ch - > index = cfg - > channel_offset / 12 ;
ch - > base = tmu - > mapbase + cfg - > channel_offset ;
} else {
ch - > index = index ;
if ( tmu - > model = = SH_TMU_SH3 )
ch - > base = tmu - > mapbase + 4 + ch - > index * 12 ;
else
ch - > base = tmu - > mapbase + 8 + ch - > index * 12 ;
}
2014-01-28 01:04:17 +04:00
2014-01-28 15:36:48 +04:00
ch - > irq = platform_get_irq ( tmu - > pdev , ch - > index ) ;
2014-01-28 01:04:17 +04:00
if ( ch - > irq < 0 ) {
2014-01-28 01:04:17 +04:00
dev_err ( & tmu - > pdev - > dev , " ch%u: failed to get irq \n " ,
ch - > index ) ;
2014-01-28 01:04:17 +04:00
return ch - > irq ;
}
ch - > cs_enabled = false ;
ch - > enable_count = 0 ;
2014-02-17 19:04:16 +04:00
return sh_tmu_register ( ch , dev_name ( & tmu - > pdev - > dev ) ,
2014-01-28 15:36:48 +04:00
clockevent , clocksource ) ;
2014-01-28 01:04:17 +04:00
}
2014-01-28 15:36:48 +04:00
static int sh_tmu_map_memory ( struct sh_tmu_device * tmu )
2009-05-01 10:51:00 +04:00
{
struct resource * res ;
2014-01-28 01:04:17 +04:00
res = platform_get_resource ( tmu - > pdev , IORESOURCE_MEM , 0 ) ;
2009-05-01 10:51:00 +04:00
if ( ! res ) {
2014-01-28 01:04:17 +04:00
dev_err ( & tmu - > pdev - > dev , " failed to get I/O memory \n " ) ;
2014-01-28 15:36:48 +04:00
return - ENXIO ;
2009-05-01 10:51:00 +04:00
}
2014-01-28 15:36:48 +04:00
tmu - > mapbase = ioremap_nocache ( res - > start , resource_size ( res ) ) ;
if ( tmu - > mapbase = = NULL )
return - ENXIO ;
2014-01-28 01:04:17 +04:00
/*
2014-01-28 15:36:48 +04:00
* In legacy platform device configuration ( with one device per channel )
* the resource points to the channel base address .
2014-01-28 01:04:17 +04:00
*/
2014-01-28 15:36:48 +04:00
if ( tmu - > model = = SH_TMU_LEGACY ) {
struct sh_timer_config * cfg = tmu - > pdev - > dev . platform_data ;
tmu - > mapbase - = cfg - > channel_offset ;
2009-05-01 10:51:00 +04:00
}
2014-01-28 15:36:48 +04:00
return 0 ;
}
2014-01-28 01:04:17 +04:00
2014-01-28 15:36:48 +04:00
static void sh_tmu_unmap_memory ( struct sh_tmu_device * tmu )
{
if ( tmu - > model = = SH_TMU_LEGACY ) {
struct sh_timer_config * cfg = tmu - > pdev - > dev . platform_data ;
tmu - > mapbase + = cfg - > channel_offset ;
}
iounmap ( tmu - > mapbase ) ;
}
static int sh_tmu_setup ( struct sh_tmu_device * tmu , struct platform_device * pdev )
{
struct sh_timer_config * cfg = pdev - > dev . platform_data ;
const struct platform_device_id * id = pdev - > id_entry ;
unsigned int i ;
int ret ;
if ( ! cfg ) {
dev_err ( & tmu - > pdev - > dev , " missing platform data \n " ) ;
return - ENXIO ;
}
tmu - > pdev = pdev ;
tmu - > model = id - > driver_data ;
/* Get hold of clock. */
2014-02-14 03:35:18 +04:00
tmu - > clk = clk_get ( & tmu - > pdev - > dev ,
tmu - > model = = SH_TMU_LEGACY ? " tmu_fck " : " fck " ) ;
2014-01-28 01:04:17 +04:00
if ( IS_ERR ( tmu - > clk ) ) {
dev_err ( & tmu - > pdev - > dev , " cannot get clock \n " ) ;
2014-01-28 15:36:48 +04:00
return PTR_ERR ( tmu - > clk ) ;
2009-05-01 10:51:00 +04:00
}
2013-11-08 14:08:00 +04:00
2014-01-28 01:04:17 +04:00
ret = clk_prepare ( tmu - > clk ) ;
2013-11-08 14:08:00 +04:00
if ( ret < 0 )
2014-01-28 15:36:48 +04:00
goto err_clk_put ;
/* Map the memory resource. */
ret = sh_tmu_map_memory ( tmu ) ;
if ( ret < 0 ) {
dev_err ( & tmu - > pdev - > dev , " failed to remap I/O memory \n " ) ;
goto err_clk_unprepare ;
}
2013-11-08 14:08:00 +04:00
2014-01-28 15:36:48 +04:00
/* Allocate and setup the channels. */
if ( tmu - > model = = SH_TMU_LEGACY )
tmu - > num_channels = 1 ;
else
tmu - > num_channels = hweight8 ( cfg - > channels_mask ) ;
tmu - > channels = kzalloc ( sizeof ( * tmu - > channels ) * tmu - > num_channels ,
GFP_KERNEL ) ;
2014-01-28 01:04:17 +04:00
if ( tmu - > channels = = NULL ) {
ret = - ENOMEM ;
2014-01-28 15:36:48 +04:00
goto err_unmap ;
2014-01-28 01:04:17 +04:00
}
2014-01-28 15:36:48 +04:00
if ( tmu - > model = = SH_TMU_LEGACY ) {
ret = sh_tmu_channel_setup ( & tmu - > channels [ 0 ] , 0 ,
cfg - > clockevent_rating ! = 0 ,
cfg - > clocksource_rating ! = 0 , tmu ) ;
if ( ret < 0 )
goto err_unmap ;
} else {
/*
* Use the first channel as a clock event device and the second
* channel as a clock source .
*/
for ( i = 0 ; i < tmu - > num_channels ; + + i ) {
ret = sh_tmu_channel_setup ( & tmu - > channels [ i ] , i ,
i = = 0 , i = = 1 , tmu ) ;
if ( ret < 0 )
goto err_unmap ;
}
}
2014-01-28 01:04:17 +04:00
2014-01-28 15:36:48 +04:00
platform_set_drvdata ( pdev , tmu ) ;
2013-11-08 14:07:59 +04:00
return 0 ;
2014-01-28 15:36:48 +04:00
err_unmap :
2014-01-28 01:04:17 +04:00
kfree ( tmu - > channels ) ;
2014-01-28 15:36:48 +04:00
sh_tmu_unmap_memory ( tmu ) ;
err_clk_unprepare :
2014-01-28 01:04:17 +04:00
clk_unprepare ( tmu - > clk ) ;
2014-01-28 15:36:48 +04:00
err_clk_put :
2014-01-28 01:04:17 +04:00
clk_put ( tmu - > clk ) ;
2009-05-01 10:51:00 +04:00
return ret ;
}
2012-12-22 03:11:38 +04:00
static int sh_tmu_probe ( struct platform_device * pdev )
2009-05-01 10:51:00 +04:00
{
2014-01-28 01:04:17 +04:00
struct sh_tmu_device * tmu = platform_get_drvdata ( pdev ) ;
2009-05-01 10:51:00 +04:00
int ret ;
2012-08-06 03:41:20 +04:00
if ( ! is_early_platform_device ( pdev ) ) {
2012-08-06 03:48:17 +04:00
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
2012-08-06 03:41:20 +04:00
}
2012-03-14 01:40:00 +04:00
2014-01-28 01:04:17 +04:00
if ( tmu ) {
2010-03-10 10:26:25 +03:00
dev_info ( & pdev - > dev , " kept as earlytimer \n " ) ;
2012-08-06 03:48:17 +04:00
goto out ;
2009-05-01 10:51:00 +04:00
}
2014-01-28 01:04:17 +04:00
tmu = kzalloc ( sizeof ( * tmu ) , GFP_KERNEL ) ;
2014-01-28 01:04:17 +04:00
if ( tmu = = NULL ) {
2009-05-01 10:51:00 +04:00
dev_err ( & pdev - > dev , " failed to allocate driver data \n " ) ;
return - ENOMEM ;
}
2014-01-28 01:04:17 +04:00
ret = sh_tmu_setup ( tmu , pdev ) ;
2009-05-01 10:51:00 +04:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
kfree ( tmu ) ;
2012-08-06 03:48:17 +04:00
pm_runtime_idle ( & pdev - > dev ) ;
return ret ;
2009-05-01 10:51:00 +04:00
}
2012-08-06 03:48:17 +04:00
if ( is_early_platform_device ( pdev ) )
return 0 ;
out :
2014-01-28 15:36:48 +04:00
if ( tmu - > has_clockevent | | tmu - > has_clocksource )
2012-08-06 03:48:17 +04:00
pm_runtime_irq_safe ( & pdev - > dev ) ;
else
pm_runtime_idle ( & pdev - > dev ) ;
return 0 ;
2009-05-01 10:51:00 +04:00
}
2012-12-22 03:11:38 +04:00
static int sh_tmu_remove ( struct platform_device * pdev )
2009-05-01 10:51:00 +04:00
{
return - EBUSY ; /* cannot unregister clockevent and clocksource */
}
2014-01-28 15:36:48 +04:00
static const struct platform_device_id sh_tmu_id_table [ ] = {
{ " sh_tmu " , SH_TMU_LEGACY } ,
{ " sh-tmu " , SH_TMU } ,
{ " sh-tmu-sh3 " , SH_TMU_SH3 } ,
{ }
} ;
MODULE_DEVICE_TABLE ( platform , sh_tmu_id_table ) ;
2009-05-01 10:51:00 +04:00
static struct platform_driver sh_tmu_device_driver = {
. probe = sh_tmu_probe ,
2012-12-22 03:11:38 +04:00
. remove = sh_tmu_remove ,
2009-05-01 10:51:00 +04:00
. driver = {
. name = " sh_tmu " ,
2014-01-28 15:36:48 +04:00
} ,
. id_table = sh_tmu_id_table ,
2009-05-01 10:51:00 +04:00
} ;
static int __init sh_tmu_init ( void )
{
return platform_driver_register ( & sh_tmu_device_driver ) ;
}
static void __exit sh_tmu_exit ( void )
{
platform_driver_unregister ( & sh_tmu_device_driver ) ;
}
early_platform_init ( " earlytimer " , & sh_tmu_device_driver ) ;
2013-03-05 10:40:42 +04:00
subsys_initcall ( sh_tmu_init ) ;
2009-05-01 10:51:00 +04:00
module_exit ( sh_tmu_exit ) ;
MODULE_AUTHOR ( " Magnus Damm " ) ;
MODULE_DESCRIPTION ( " SuperH TMU Timer Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;