2009-01-22 12:55:59 +03:00
/*
* SuperH Timer Support - CMT
*
* Copyright ( C ) 2008 Magnus Damm
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
2014-02-12 19:56:44 +04:00
# include <linux/clk.h>
# include <linux/clockchips.h>
# include <linux/clocksource.h>
# include <linux/delay.h>
# include <linux/err.h>
2009-01-22 12:55:59 +03:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2014-02-12 19:56:44 +04:00
# include <linux/ioport.h>
2009-01-22 12:55:59 +03:00
# include <linux/irq.h>
2011-07-03 21:36:22 +04:00
# include <linux/module.h>
2014-02-12 20:12:40 +04:00
# include <linux/of.h>
2014-02-12 19:56:44 +04:00
# include <linux/platform_device.h>
2012-03-14 01:40:06 +04:00
# include <linux/pm_domain.h>
2012-08-06 03:48:57 +04:00
# include <linux/pm_runtime.h>
2014-02-12 19:56:44 +04:00
# include <linux/sh_timer.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2009-01-22 12:55:59 +03:00
2014-01-28 01:04:17 +04:00
struct sh_cmt_device ;
2014-01-27 18:29:19 +04:00
2014-02-12 02:46:48 +04:00
/*
* The CMT comes in 5 different identified flavours , depending not only on the
* SoC but also on the particular instance . The following table lists the main
* characteristics of those flavours .
*
* 16 B 32 B 32 B - F 48 B 48 B - 2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Channels 2 1 / 4 1 6 2 / 8
* Control Width 16 16 16 16 32
* Counter Width 16 32 32 32 / 48 32 / 48
* Shared Start / Stop Y Y Y Y N
*
* The 48 - bit gen2 version has a per - channel start / stop register located in the
* channel registers block . All other versions have a shared start / stop register
* located in the global space .
*
2014-01-28 15:36:48 +04:00
* Channels are indexed from 0 to N - 1 in the documentation . The channel index
* infers the start / stop bit position in the control register and the channel
* registers block address . Some CMT instances have a subset of channels
* available , in which case the index in the documentation doesn ' t match the
* " real " index as implemented in hardware . This is for instance the case with
* CMT0 on r8a7740 , which is a 32 - bit variant with a single channel numbered 0
* in the documentation but using start / stop bit 5 and having its registers
* block at 0x60 .
*
* Similarly CMT0 on r8a73a4 , r8a7790 and r8a7791 , while implementing 32 - bit
2014-02-12 02:46:48 +04:00
* channels only , is a 48 - bit gen2 CMT with the 48 - bit channels unavailable .
*/
enum sh_cmt_model {
SH_CMT_16BIT ,
SH_CMT_32BIT ,
SH_CMT_32BIT_FAST ,
SH_CMT_48BIT ,
SH_CMT_48BIT_GEN2 ,
} ;
struct sh_cmt_info {
enum sh_cmt_model model ;
unsigned long width ; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit ;
unsigned long clear_bits ;
/* callbacks for CMSTR and CMCSR access */
unsigned long ( * read_control ) ( void __iomem * base , unsigned long offs ) ;
void ( * write_control ) ( void __iomem * base , unsigned long offs ,
unsigned long value ) ;
/* callbacks for CMCNT and CMCOR access */
unsigned long ( * read_count ) ( void __iomem * base , unsigned long offs ) ;
void ( * write_count ) ( void __iomem * base , unsigned long offs ,
unsigned long value ) ;
} ;
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel {
2014-01-28 01:04:17 +04:00
struct sh_cmt_device * cmt ;
2009-01-22 12:55:59 +03:00
2014-01-28 15:36:48 +04:00
unsigned int index ; /* Index in the documentation */
unsigned int hwidx ; /* Real hardware index */
void __iomem * iostart ;
void __iomem * ioctrl ;
2014-01-28 01:04:17 +04:00
2014-01-28 15:36:48 +04:00
unsigned int timer_bit ;
2009-01-22 12:55:59 +03:00
unsigned long flags ;
unsigned long match_value ;
unsigned long next_match_value ;
unsigned long max_match_value ;
unsigned long rate ;
2012-05-25 08:36:43 +04:00
raw_spinlock_t lock ;
2009-01-22 12:55:59 +03:00
struct clock_event_device ced ;
2009-04-17 09:26:31 +04:00
struct clocksource cs ;
2009-01-22 12:55:59 +03:00
unsigned long total_cycles ;
2012-08-06 03:48:57 +04:00
bool cs_enabled ;
2014-01-27 18:29:19 +04:00
} ;
2014-01-28 01:04:17 +04:00
struct sh_cmt_device {
2014-01-27 18:29:19 +04:00
struct platform_device * pdev ;
2014-02-12 02:46:48 +04:00
const struct sh_cmt_info * info ;
2014-01-27 18:29:19 +04:00
void __iomem * mapbase ;
struct clk * clk ;
2014-02-17 19:49:05 +04:00
raw_spinlock_t lock ; /* Protect the shared start/stop register */
2014-01-28 01:04:17 +04:00
struct sh_cmt_channel * channels ;
unsigned int num_channels ;
2014-02-12 20:12:40 +04:00
unsigned int hw_channels ;
2014-01-28 15:36:48 +04:00
bool has_clockevent ;
bool has_clocksource ;
2009-01-22 12:55:59 +03:00
} ;
2014-01-29 03:33:08 +04:00
# define SH_CMT16_CMCSR_CMF (1 << 7)
# define SH_CMT16_CMCSR_CMIE (1 << 6)
# define SH_CMT16_CMCSR_CKS8 (0 << 0)
# define SH_CMT16_CMCSR_CKS32 (1 << 0)
# define SH_CMT16_CMCSR_CKS128 (2 << 0)
# define SH_CMT16_CMCSR_CKS512 (3 << 0)
# define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
# define SH_CMT32_CMCSR_CMF (1 << 15)
# define SH_CMT32_CMCSR_OVF (1 << 14)
# define SH_CMT32_CMCSR_WRFLG (1 << 13)
# define SH_CMT32_CMCSR_STTF (1 << 12)
# define SH_CMT32_CMCSR_STPF (1 << 11)
# define SH_CMT32_CMCSR_SSIE (1 << 10)
# define SH_CMT32_CMCSR_CMS (1 << 9)
# define SH_CMT32_CMCSR_CMM (1 << 8)
# define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
# define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
# define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
# define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
# define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
# define SH_CMT32_CMCSR_DBGIVD (1 << 3)
# define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
# define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
# define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
# define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
# define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
2012-12-14 09:54:19 +04:00
static unsigned long sh_cmt_read16 ( void __iomem * base , unsigned long offs )
2012-12-14 09:54:10 +04:00
{
return ioread16 ( base + ( offs < < 1 ) ) ;
}
2012-12-14 09:54:19 +04:00
static unsigned long sh_cmt_read32 ( void __iomem * base , unsigned long offs )
{
return ioread32 ( base + ( offs < < 2 ) ) ;
}
static void sh_cmt_write16 ( void __iomem * base , unsigned long offs ,
unsigned long value )
2012-12-14 09:54:10 +04:00
{
iowrite16 ( value , base + ( offs < < 1 ) ) ;
}
2009-01-22 12:55:59 +03:00
2012-12-14 09:54:19 +04:00
static void sh_cmt_write32 ( void __iomem * base , unsigned long offs ,
unsigned long value )
{
iowrite32 ( value , base + ( offs < < 2 ) ) ;
}
2014-02-12 02:46:48 +04:00
static const struct sh_cmt_info sh_cmt_info [ ] = {
[ SH_CMT_16BIT ] = {
. model = SH_CMT_16BIT ,
. width = 16 ,
2014-01-29 03:33:08 +04:00
. overflow_bit = SH_CMT16_CMCSR_CMF ,
. clear_bits = ~ SH_CMT16_CMCSR_CMF ,
2014-02-12 02:46:48 +04:00
. read_control = sh_cmt_read16 ,
. write_control = sh_cmt_write16 ,
. read_count = sh_cmt_read16 ,
. write_count = sh_cmt_write16 ,
} ,
[ SH_CMT_32BIT ] = {
. model = SH_CMT_32BIT ,
. width = 32 ,
2014-01-29 03:33:08 +04:00
. overflow_bit = SH_CMT32_CMCSR_CMF ,
. clear_bits = ~ ( SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF ) ,
2014-02-12 02:46:48 +04:00
. read_control = sh_cmt_read16 ,
. write_control = sh_cmt_write16 ,
. read_count = sh_cmt_read32 ,
. write_count = sh_cmt_write32 ,
} ,
[ SH_CMT_32BIT_FAST ] = {
. model = SH_CMT_32BIT_FAST ,
. width = 32 ,
2014-01-29 03:33:08 +04:00
. overflow_bit = SH_CMT32_CMCSR_CMF ,
. clear_bits = ~ ( SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF ) ,
2014-02-12 02:46:48 +04:00
. read_control = sh_cmt_read16 ,
. write_control = sh_cmt_write16 ,
. read_count = sh_cmt_read32 ,
. write_count = sh_cmt_write32 ,
} ,
[ SH_CMT_48BIT ] = {
. model = SH_CMT_48BIT ,
. width = 32 ,
2014-01-29 03:33:08 +04:00
. overflow_bit = SH_CMT32_CMCSR_CMF ,
. clear_bits = ~ ( SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF ) ,
2014-02-12 02:46:48 +04:00
. read_control = sh_cmt_read32 ,
. write_control = sh_cmt_write32 ,
. read_count = sh_cmt_read32 ,
. write_count = sh_cmt_write32 ,
} ,
[ SH_CMT_48BIT_GEN2 ] = {
. model = SH_CMT_48BIT_GEN2 ,
. width = 32 ,
2014-01-29 03:33:08 +04:00
. overflow_bit = SH_CMT32_CMCSR_CMF ,
. clear_bits = ~ ( SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF ) ,
2014-02-12 02:46:48 +04:00
. read_control = sh_cmt_read32 ,
. write_control = sh_cmt_write32 ,
. read_count = sh_cmt_read32 ,
. write_count = sh_cmt_write32 ,
} ,
} ;
2009-01-22 12:55:59 +03:00
# define CMCSR 0 /* channel register */
# define CMCNT 1 /* channel register */
# define CMCOR 2 /* channel register */
2014-01-27 18:29:19 +04:00
static inline unsigned long sh_cmt_read_cmstr ( struct sh_cmt_channel * ch )
2012-12-14 09:54:00 +04:00
{
2014-01-28 15:36:48 +04:00
if ( ch - > iostart )
return ch - > cmt - > info - > read_control ( ch - > iostart , 0 ) ;
else
return ch - > cmt - > info - > read_control ( ch - > cmt - > mapbase , 0 ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-28 15:36:48 +04:00
static inline void sh_cmt_write_cmstr ( struct sh_cmt_channel * ch ,
unsigned long value )
2012-12-14 09:54:00 +04:00
{
2014-01-28 15:36:48 +04:00
if ( ch - > iostart )
ch - > cmt - > info - > write_control ( ch - > iostart , 0 , value ) ;
else
ch - > cmt - > info - > write_control ( ch - > cmt - > mapbase , 0 , value ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-28 15:36:48 +04:00
static inline unsigned long sh_cmt_read_cmcsr ( struct sh_cmt_channel * ch )
2012-12-14 09:54:00 +04:00
{
2014-01-28 15:36:48 +04:00
return ch - > cmt - > info - > read_control ( ch - > ioctrl , CMCSR ) ;
2009-01-22 12:55:59 +03:00
}
2014-01-28 15:36:48 +04:00
static inline void sh_cmt_write_cmcsr ( struct sh_cmt_channel * ch ,
2012-12-14 09:54:00 +04:00
unsigned long value )
{
2014-01-28 15:36:48 +04:00
ch - > cmt - > info - > write_control ( ch - > ioctrl , CMCSR , value ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-28 15:36:48 +04:00
static inline unsigned long sh_cmt_read_cmcnt ( struct sh_cmt_channel * ch )
2012-12-14 09:54:00 +04:00
{
2014-01-28 15:36:48 +04:00
return ch - > cmt - > info - > read_count ( ch - > ioctrl , CMCNT ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-27 18:29:19 +04:00
static inline void sh_cmt_write_cmcnt ( struct sh_cmt_channel * ch ,
2012-12-14 09:54:00 +04:00
unsigned long value )
{
2014-01-28 15:36:48 +04:00
ch - > cmt - > info - > write_count ( ch - > ioctrl , CMCNT , value ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-27 18:29:19 +04:00
static inline void sh_cmt_write_cmcor ( struct sh_cmt_channel * ch ,
2012-12-14 09:54:00 +04:00
unsigned long value )
{
2014-01-28 15:36:48 +04:00
ch - > cmt - > info - > write_count ( ch - > ioctrl , CMCOR , value ) ;
2012-12-14 09:54:00 +04:00
}
2014-01-27 18:29:19 +04:00
static unsigned long sh_cmt_get_counter ( struct sh_cmt_channel * ch ,
2009-01-22 12:55:59 +03:00
int * has_wrapped )
{
unsigned long v1 , v2 , v3 ;
2009-04-28 12:17:54 +04:00
int o1 , o2 ;
2014-02-12 02:46:48 +04:00
o1 = sh_cmt_read_cmcsr ( ch ) & ch - > cmt - > info - > overflow_bit ;
2009-01-22 12:55:59 +03:00
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
2009-04-28 12:17:54 +04:00
o2 = o1 ;
2014-01-27 18:29:19 +04:00
v1 = sh_cmt_read_cmcnt ( ch ) ;
v2 = sh_cmt_read_cmcnt ( ch ) ;
v3 = sh_cmt_read_cmcnt ( ch ) ;
2014-02-12 02:46:48 +04:00
o1 = sh_cmt_read_cmcsr ( ch ) & ch - > cmt - > info - > overflow_bit ;
2009-04-28 12:17:54 +04:00
} while ( unlikely ( ( o1 ! = o2 ) | | ( v1 > v2 & & v1 < v3 )
| | ( v2 > v3 & & v2 < v1 ) | | ( v3 > v1 & & v3 < v2 ) ) ) ;
2009-01-22 12:55:59 +03:00
2009-04-28 12:17:54 +04:00
* has_wrapped = o1 ;
2009-01-22 12:55:59 +03:00
return v2 ;
}
2014-01-27 18:29:19 +04:00
static void sh_cmt_start_stop_ch ( struct sh_cmt_channel * ch , int start )
2009-01-22 12:55:59 +03:00
{
unsigned long flags , value ;
/* start stop register shared by multiple timer channels */
2014-02-17 19:49:05 +04:00
raw_spin_lock_irqsave ( & ch - > cmt - > lock , flags ) ;
2014-01-27 18:29:19 +04:00
value = sh_cmt_read_cmstr ( ch ) ;
2009-01-22 12:55:59 +03:00
if ( start )
2014-01-28 15:36:48 +04:00
value | = 1 < < ch - > timer_bit ;
2009-01-22 12:55:59 +03:00
else
2014-01-28 15:36:48 +04:00
value & = ~ ( 1 < < ch - > timer_bit ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
sh_cmt_write_cmstr ( ch , value ) ;
2014-02-17 19:49:05 +04:00
raw_spin_unlock_irqrestore ( & ch - > cmt - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
static int sh_cmt_enable ( struct sh_cmt_channel * ch , unsigned long * rate )
2009-01-22 12:55:59 +03:00
{
2011-07-13 11:59:48 +04:00
int k , ret ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
pm_runtime_get_sync ( & ch - > cmt - > pdev - > dev ) ;
dev_pm_syscore_device ( & ch - > cmt - > pdev - > dev , true ) ;
2012-08-06 03:48:57 +04:00
2011-05-31 10:26:42 +04:00
/* enable clock */
2014-01-27 18:29:19 +04:00
ret = clk_enable ( ch - > cmt - > clk ) ;
2009-01-22 12:55:59 +03:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
dev_err ( & ch - > cmt - > pdev - > dev , " ch%u: cannot enable clock \n " ,
ch - > index ) ;
2011-07-13 11:59:48 +04:00
goto err0 ;
2009-01-22 12:55:59 +03:00
}
/* make sure channel is disabled */
2014-01-27 18:29:19 +04:00
sh_cmt_start_stop_ch ( ch , 0 ) ;
2009-01-22 12:55:59 +03:00
/* configure channel, periodic mode and maximum timeout */
2014-02-12 02:46:48 +04:00
if ( ch - > cmt - > info - > width = = 16 ) {
2014-01-27 18:29:19 +04:00
* rate = clk_get_rate ( ch - > cmt - > clk ) / 512 ;
2014-01-29 03:33:08 +04:00
sh_cmt_write_cmcsr ( ch , SH_CMT16_CMCSR_CMIE |
SH_CMT16_CMCSR_CKS512 ) ;
2009-04-29 18:50:37 +04:00
} else {
2014-01-27 18:29:19 +04:00
* rate = clk_get_rate ( ch - > cmt - > clk ) / 8 ;
2014-01-29 03:33:08 +04:00
sh_cmt_write_cmcsr ( ch , SH_CMT32_CMCSR_CMM |
SH_CMT32_CMCSR_CMTOUT_IE |
SH_CMT32_CMCSR_CMR_IRQ |
SH_CMT32_CMCSR_CKS_RCLK8 ) ;
2009-04-29 18:50:37 +04:00
}
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
sh_cmt_write_cmcor ( ch , 0xffffffff ) ;
sh_cmt_write_cmcnt ( ch , 0 ) ;
2009-01-22 12:55:59 +03:00
2011-07-13 11:59:48 +04:00
/*
* According to the sh73a0 user ' s manual , as CMCNT can be operated
* only by the RCLK ( Pseudo 32 KHz ) , there ' s one restriction on
* modifying CMCNT register ; two RCLK cycles are necessary before
* this register is either read or any modification of the value
* it holds is reflected in the LSI ' s actual operation .
*
* While at it , we ' re supposed to clear out the CMCNT as of this
* moment , so make sure it ' s processed properly here . This will
* take RCLKx2 at maximum .
*/
for ( k = 0 ; k < 100 ; k + + ) {
2014-01-27 18:29:19 +04:00
if ( ! sh_cmt_read_cmcnt ( ch ) )
2011-07-13 11:59:48 +04:00
break ;
udelay ( 1 ) ;
}
2014-01-27 18:29:19 +04:00
if ( sh_cmt_read_cmcnt ( ch ) ) {
2014-01-28 01:04:17 +04:00
dev_err ( & ch - > cmt - > pdev - > dev , " ch%u: cannot clear CMCNT \n " ,
ch - > index ) ;
2011-07-13 11:59:48 +04:00
ret = - ETIMEDOUT ;
goto err1 ;
}
2009-01-22 12:55:59 +03:00
/* enable channel */
2014-01-27 18:29:19 +04:00
sh_cmt_start_stop_ch ( ch , 1 ) ;
2009-01-22 12:55:59 +03:00
return 0 ;
2011-07-13 11:59:48 +04:00
err1 :
/* stop clock */
2014-01-27 18:29:19 +04:00
clk_disable ( ch - > cmt - > clk ) ;
2011-07-13 11:59:48 +04:00
err0 :
return ret ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
static void sh_cmt_disable ( struct sh_cmt_channel * ch )
2009-01-22 12:55:59 +03:00
{
/* disable channel */
2014-01-27 18:29:19 +04:00
sh_cmt_start_stop_ch ( ch , 0 ) ;
2009-01-22 12:55:59 +03:00
2009-06-17 09:04:04 +04:00
/* disable interrupts in CMT block */
2014-01-27 18:29:19 +04:00
sh_cmt_write_cmcsr ( ch , 0 ) ;
2009-06-17 09:04:04 +04:00
2011-05-31 10:26:42 +04:00
/* stop clock */
2014-01-27 18:29:19 +04:00
clk_disable ( ch - > cmt - > clk ) ;
2012-08-06 03:48:57 +04:00
2014-01-27 18:29:19 +04:00
dev_pm_syscore_device ( & ch - > cmt - > pdev - > dev , false ) ;
pm_runtime_put ( & ch - > cmt - > pdev - > dev ) ;
2009-01-22 12:55:59 +03:00
}
/* private flags */
# define FLAG_CLOCKEVENT (1 << 0)
# define FLAG_CLOCKSOURCE (1 << 1)
# define FLAG_REPROGRAM (1 << 2)
# define FLAG_SKIPEVENT (1 << 3)
# define FLAG_IRQCONTEXT (1 << 4)
2014-01-27 18:29:19 +04:00
static void sh_cmt_clock_event_program_verify ( struct sh_cmt_channel * ch ,
2009-01-22 12:55:59 +03:00
int absolute )
{
unsigned long new_match ;
2014-01-27 18:29:19 +04:00
unsigned long value = ch - > next_match_value ;
2009-01-22 12:55:59 +03:00
unsigned long delay = 0 ;
unsigned long now = 0 ;
int has_wrapped ;
2014-01-27 18:29:19 +04:00
now = sh_cmt_get_counter ( ch , & has_wrapped ) ;
ch - > flags | = FLAG_REPROGRAM ; /* force reprogram */
2009-01-22 12:55:59 +03:00
if ( has_wrapped ) {
/* we're competing with the interrupt handler.
* - > let the interrupt handler reprogram the timer .
* - > interrupt number two handles the event .
*/
2014-01-27 18:29:19 +04:00
ch - > flags | = FLAG_SKIPEVENT ;
2009-01-22 12:55:59 +03:00
return ;
}
if ( absolute )
now = 0 ;
do {
/* reprogram the timer hardware,
* but don ' t save the new match value yet .
*/
new_match = now + value + delay ;
2014-01-27 18:29:19 +04:00
if ( new_match > ch - > max_match_value )
new_match = ch - > max_match_value ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
sh_cmt_write_cmcor ( ch , new_match ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
now = sh_cmt_get_counter ( ch , & has_wrapped ) ;
if ( has_wrapped & & ( new_match > ch - > match_value ) ) {
2009-01-22 12:55:59 +03:00
/* we are changing to a greater match value,
* so this wrap must be caused by the counter
* matching the old value .
* - > first interrupt reprograms the timer .
* - > interrupt number two handles the event .
*/
2014-01-27 18:29:19 +04:00
ch - > flags | = FLAG_SKIPEVENT ;
2009-01-22 12:55:59 +03:00
break ;
}
if ( has_wrapped ) {
/* we are changing to a smaller match value,
* so the wrap must be caused by the counter
* matching the new value .
* - > save programmed match value .
* - > let isr handle the event .
*/
2014-01-27 18:29:19 +04:00
ch - > match_value = new_match ;
2009-01-22 12:55:59 +03:00
break ;
}
/* be safe: verify hardware settings */
if ( now < new_match ) {
/* timer value is below match value, all good.
* this makes sure we won ' t miss any match events .
* - > save programmed match value .
* - > let isr handle the event .
*/
2014-01-27 18:29:19 +04:00
ch - > match_value = new_match ;
2009-01-22 12:55:59 +03:00
break ;
}
/* the counter has reached a value greater
* than our new match value . and since the
* has_wrapped flag isn ' t set we must have
* programmed a too close event .
* - > increase delay and retry .
*/
if ( delay )
delay < < = 1 ;
else
delay = 1 ;
if ( ! delay )
2014-01-28 01:04:17 +04:00
dev_warn ( & ch - > cmt - > pdev - > dev , " ch%u: too long delay \n " ,
ch - > index ) ;
2009-01-22 12:55:59 +03:00
} while ( delay ) ;
}
2014-01-27 18:29:19 +04:00
static void __sh_cmt_set_next ( struct sh_cmt_channel * ch , unsigned long delta )
2009-01-22 12:55:59 +03:00
{
2014-01-27 18:29:19 +04:00
if ( delta > ch - > max_match_value )
2014-01-28 01:04:17 +04:00
dev_warn ( & ch - > cmt - > pdev - > dev , " ch%u: delta out of range \n " ,
ch - > index ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
ch - > next_match_value = delta ;
sh_cmt_clock_event_program_verify ( ch , 0 ) ;
2010-12-17 10:25:09 +03:00
}
2014-01-27 18:29:19 +04:00
static void sh_cmt_set_next ( struct sh_cmt_channel * ch , unsigned long delta )
2010-12-17 10:25:09 +03:00
{
unsigned long flags ;
2014-01-27 18:29:19 +04:00
raw_spin_lock_irqsave ( & ch - > lock , flags ) ;
__sh_cmt_set_next ( ch , delta ) ;
raw_spin_unlock_irqrestore ( & ch - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
}
static irqreturn_t sh_cmt_interrupt ( int irq , void * dev_id )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = dev_id ;
2009-01-22 12:55:59 +03:00
/* clear flags */
2014-02-12 02:46:48 +04:00
sh_cmt_write_cmcsr ( ch , sh_cmt_read_cmcsr ( ch ) &
ch - > cmt - > info - > clear_bits ) ;
2009-01-22 12:55:59 +03:00
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
* isr before we end up here .
*/
2014-01-27 18:29:19 +04:00
if ( ch - > flags & FLAG_CLOCKSOURCE )
ch - > total_cycles + = ch - > match_value + 1 ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( ! ( ch - > flags & FLAG_REPROGRAM ) )
ch - > next_match_value = ch - > max_match_value ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
ch - > flags | = FLAG_IRQCONTEXT ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( ch - > flags & FLAG_CLOCKEVENT ) {
if ( ! ( ch - > flags & FLAG_SKIPEVENT ) ) {
if ( ch - > ced . mode = = CLOCK_EVT_MODE_ONESHOT ) {
ch - > next_match_value = ch - > max_match_value ;
ch - > flags | = FLAG_REPROGRAM ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
ch - > ced . event_handler ( & ch - > ced ) ;
2009-01-22 12:55:59 +03:00
}
}
2014-01-27 18:29:19 +04:00
ch - > flags & = ~ FLAG_SKIPEVENT ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( ch - > flags & FLAG_REPROGRAM ) {
ch - > flags & = ~ FLAG_REPROGRAM ;
sh_cmt_clock_event_program_verify ( ch , 1 ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( ch - > flags & FLAG_CLOCKEVENT )
if ( ( ch - > ced . mode = = CLOCK_EVT_MODE_SHUTDOWN )
| | ( ch - > match_value = = ch - > next_match_value ) )
ch - > flags & = ~ FLAG_REPROGRAM ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
ch - > flags & = ~ FLAG_IRQCONTEXT ;
2009-01-22 12:55:59 +03:00
return IRQ_HANDLED ;
}
2014-01-27 18:29:19 +04:00
static int sh_cmt_start ( struct sh_cmt_channel * ch , unsigned long flag )
2009-01-22 12:55:59 +03:00
{
int ret = 0 ;
unsigned long flags ;
2014-01-27 18:29:19 +04:00
raw_spin_lock_irqsave ( & ch - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( ! ( ch - > flags & ( FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE ) ) )
ret = sh_cmt_enable ( ch , & ch - > rate ) ;
2009-01-22 12:55:59 +03:00
if ( ret )
goto out ;
2014-01-27 18:29:19 +04:00
ch - > flags | = flag ;
2009-01-22 12:55:59 +03:00
/* setup timeout if no clockevent */
2014-01-27 18:29:19 +04:00
if ( ( flag = = FLAG_CLOCKSOURCE ) & & ( ! ( ch - > flags & FLAG_CLOCKEVENT ) ) )
__sh_cmt_set_next ( ch , ch - > max_match_value ) ;
2009-01-22 12:55:59 +03:00
out :
2014-01-27 18:29:19 +04:00
raw_spin_unlock_irqrestore ( & ch - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
return ret ;
}
2014-01-27 18:29:19 +04:00
static void sh_cmt_stop ( struct sh_cmt_channel * ch , unsigned long flag )
2009-01-22 12:55:59 +03:00
{
unsigned long flags ;
unsigned long f ;
2014-01-27 18:29:19 +04:00
raw_spin_lock_irqsave ( & ch - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
f = ch - > flags & ( FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE ) ;
ch - > flags & = ~ flag ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
if ( f & & ! ( ch - > flags & ( FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE ) ) )
sh_cmt_disable ( ch ) ;
2009-01-22 12:55:59 +03:00
/* adjust the timeout to maximum if only clocksource left */
2014-01-27 18:29:19 +04:00
if ( ( flag = = FLAG_CLOCKEVENT ) & & ( ch - > flags & FLAG_CLOCKSOURCE ) )
__sh_cmt_set_next ( ch , ch - > max_match_value ) ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
raw_spin_unlock_irqrestore ( & ch - > lock , flags ) ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
static struct sh_cmt_channel * cs_to_sh_cmt ( struct clocksource * cs )
2009-04-17 09:26:31 +04:00
{
2014-01-27 18:29:19 +04:00
return container_of ( cs , struct sh_cmt_channel , cs ) ;
2009-04-17 09:26:31 +04:00
}
static cycle_t sh_cmt_clocksource_read ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = cs_to_sh_cmt ( cs ) ;
2009-04-17 09:26:31 +04:00
unsigned long flags , raw ;
unsigned long value ;
int has_wrapped ;
2014-01-27 18:29:19 +04:00
raw_spin_lock_irqsave ( & ch - > lock , flags ) ;
value = ch - > total_cycles ;
raw = sh_cmt_get_counter ( ch , & has_wrapped ) ;
2009-04-17 09:26:31 +04:00
if ( unlikely ( has_wrapped ) )
2014-01-27 18:29:19 +04:00
raw + = ch - > match_value + 1 ;
raw_spin_unlock_irqrestore ( & ch - > lock , flags ) ;
2009-04-17 09:26:31 +04:00
return value + raw ;
}
static int sh_cmt_clocksource_enable ( struct clocksource * cs )
{
2011-04-25 17:32:11 +04:00
int ret ;
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = cs_to_sh_cmt ( cs ) ;
2009-04-17 09:26:31 +04:00
2014-01-27 18:29:19 +04:00
WARN_ON ( ch - > cs_enabled ) ;
2012-08-06 03:48:57 +04:00
2014-01-27 18:29:19 +04:00
ch - > total_cycles = 0 ;
2009-04-17 09:26:31 +04:00
2014-01-27 18:29:19 +04:00
ret = sh_cmt_start ( ch , FLAG_CLOCKSOURCE ) ;
2012-08-06 03:48:57 +04:00
if ( ! ret ) {
2014-01-27 18:29:19 +04:00
__clocksource_updatefreq_hz ( cs , ch - > rate ) ;
ch - > cs_enabled = true ;
2012-08-06 03:48:57 +04:00
}
2011-04-25 17:32:11 +04:00
return ret ;
2009-04-17 09:26:31 +04:00
}
static void sh_cmt_clocksource_disable ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = cs_to_sh_cmt ( cs ) ;
2012-08-06 03:48:57 +04:00
2014-01-27 18:29:19 +04:00
WARN_ON ( ! ch - > cs_enabled ) ;
2012-08-06 03:48:57 +04:00
2014-01-27 18:29:19 +04:00
sh_cmt_stop ( ch , FLAG_CLOCKSOURCE ) ;
ch - > cs_enabled = false ;
2009-04-17 09:26:31 +04:00
}
2012-08-06 03:43:03 +04:00
static void sh_cmt_clocksource_suspend ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = cs_to_sh_cmt ( cs ) ;
2012-08-06 03:43:03 +04:00
2014-01-27 18:29:19 +04:00
sh_cmt_stop ( ch , FLAG_CLOCKSOURCE ) ;
pm_genpd_syscore_poweroff ( & ch - > cmt - > pdev - > dev ) ;
2012-08-06 03:43:03 +04:00
}
2010-02-03 01:41:40 +03:00
static void sh_cmt_clocksource_resume ( struct clocksource * cs )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = cs_to_sh_cmt ( cs ) ;
2012-08-06 03:43:03 +04:00
2014-01-27 18:29:19 +04:00
pm_genpd_syscore_poweron ( & ch - > cmt - > pdev - > dev ) ;
sh_cmt_start ( ch , FLAG_CLOCKSOURCE ) ;
2010-02-03 01:41:40 +03:00
}
2014-01-27 18:29:19 +04:00
static int sh_cmt_register_clocksource ( struct sh_cmt_channel * ch ,
2014-02-19 20:00:31 +04:00
const char * name )
2009-04-17 09:26:31 +04:00
{
2014-01-27 18:29:19 +04:00
struct clocksource * cs = & ch - > cs ;
2009-04-17 09:26:31 +04:00
cs - > name = name ;
2014-02-19 20:00:31 +04:00
cs - > rating = 125 ;
2009-04-17 09:26:31 +04:00
cs - > read = sh_cmt_clocksource_read ;
cs - > enable = sh_cmt_clocksource_enable ;
cs - > disable = sh_cmt_clocksource_disable ;
2012-08-06 03:43:03 +04:00
cs - > suspend = sh_cmt_clocksource_suspend ;
2010-02-03 01:41:40 +03:00
cs - > resume = sh_cmt_clocksource_resume ;
2009-04-17 09:26:31 +04:00
cs - > mask = CLOCKSOURCE_MASK ( sizeof ( unsigned long ) * 8 ) ;
cs - > flags = CLOCK_SOURCE_IS_CONTINUOUS ;
2010-06-02 12:10:44 +04:00
2014-01-28 01:04:17 +04:00
dev_info ( & ch - > cmt - > pdev - > dev , " ch%u: used as clock source \n " ,
ch - > index ) ;
2010-06-02 12:10:44 +04:00
2011-04-25 17:32:11 +04:00
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz ( cs , 1 ) ;
2009-04-17 09:26:31 +04:00
return 0 ;
}
2014-01-27 18:29:19 +04:00
static struct sh_cmt_channel * ced_to_sh_cmt ( struct clock_event_device * ced )
2009-01-22 12:55:59 +03:00
{
2014-01-27 18:29:19 +04:00
return container_of ( ced , struct sh_cmt_channel , ced ) ;
2009-01-22 12:55:59 +03:00
}
2014-01-27 18:29:19 +04:00
static void sh_cmt_clock_event_start ( struct sh_cmt_channel * ch , int periodic )
2009-01-22 12:55:59 +03:00
{
2014-01-27 18:29:19 +04:00
struct clock_event_device * ced = & ch - > ced ;
2009-01-22 12:55:59 +03:00
2014-01-27 18:29:19 +04:00
sh_cmt_start ( ch , FLAG_CLOCKEVENT ) ;
2009-01-22 12:55:59 +03:00
/* TODO: calculate good shift from rate and counter bit width */
ced - > shift = 32 ;
2014-01-27 18:29:19 +04:00
ced - > mult = div_sc ( ch - > rate , NSEC_PER_SEC , ced - > shift ) ;
ced - > max_delta_ns = clockevent_delta2ns ( ch - > max_match_value , ced ) ;
2009-01-22 12:55:59 +03:00
ced - > min_delta_ns = clockevent_delta2ns ( 0x1f , ced ) ;
if ( periodic )
2014-01-27 18:29:19 +04:00
sh_cmt_set_next ( ch , ( ( ch - > rate + HZ / 2 ) / HZ ) - 1 ) ;
2009-01-22 12:55:59 +03:00
else
2014-01-27 18:29:19 +04:00
sh_cmt_set_next ( ch , ch - > max_match_value ) ;
2009-01-22 12:55:59 +03:00
}
static void sh_cmt_clock_event_mode ( enum clock_event_mode mode ,
struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = ced_to_sh_cmt ( ced ) ;
2009-01-22 12:55:59 +03:00
/* deal with old setting first */
switch ( ced - > mode ) {
case CLOCK_EVT_MODE_PERIODIC :
case CLOCK_EVT_MODE_ONESHOT :
2014-01-27 18:29:19 +04:00
sh_cmt_stop ( ch , FLAG_CLOCKEVENT ) ;
2009-01-22 12:55:59 +03:00
break ;
default :
break ;
}
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2014-01-27 18:29:19 +04:00
dev_info ( & ch - > cmt - > pdev - > dev ,
2014-01-28 01:04:17 +04:00
" ch%u: used for periodic clock events \n " , ch - > index ) ;
2014-01-27 18:29:19 +04:00
sh_cmt_clock_event_start ( ch , 1 ) ;
2009-01-22 12:55:59 +03:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
2014-01-27 18:29:19 +04:00
dev_info ( & ch - > cmt - > pdev - > dev ,
2014-01-28 01:04:17 +04:00
" ch%u: used for oneshot clock events \n " , ch - > index ) ;
2014-01-27 18:29:19 +04:00
sh_cmt_clock_event_start ( ch , 0 ) ;
2009-01-22 12:55:59 +03:00
break ;
case CLOCK_EVT_MODE_SHUTDOWN :
case CLOCK_EVT_MODE_UNUSED :
2014-01-27 18:29:19 +04:00
sh_cmt_stop ( ch , FLAG_CLOCKEVENT ) ;
2009-01-22 12:55:59 +03:00
break ;
default :
break ;
}
}
static int sh_cmt_clock_event_next ( unsigned long delta ,
struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = ced_to_sh_cmt ( ced ) ;
2009-01-22 12:55:59 +03:00
BUG_ON ( ced - > mode ! = CLOCK_EVT_MODE_ONESHOT ) ;
2014-01-27 18:29:19 +04:00
if ( likely ( ch - > flags & FLAG_IRQCONTEXT ) )
ch - > next_match_value = delta - 1 ;
2009-01-22 12:55:59 +03:00
else
2014-01-27 18:29:19 +04:00
sh_cmt_set_next ( ch , delta - 1 ) ;
2009-01-22 12:55:59 +03:00
return 0 ;
}
2012-08-06 03:43:03 +04:00
static void sh_cmt_clock_event_suspend ( struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = ced_to_sh_cmt ( ced ) ;
2013-12-14 10:07:32 +04:00
2014-01-27 18:29:19 +04:00
pm_genpd_syscore_poweroff ( & ch - > cmt - > pdev - > dev ) ;
clk_unprepare ( ch - > cmt - > clk ) ;
2012-08-06 03:43:03 +04:00
}
static void sh_cmt_clock_event_resume ( struct clock_event_device * ced )
{
2014-01-27 18:29:19 +04:00
struct sh_cmt_channel * ch = ced_to_sh_cmt ( ced ) ;
2013-12-14 10:07:32 +04:00
2014-01-27 18:29:19 +04:00
clk_prepare ( ch - > cmt - > clk ) ;
pm_genpd_syscore_poweron ( & ch - > cmt - > pdev - > dev ) ;
2012-08-06 03:43:03 +04:00
}
2014-02-21 04:24:47 +04:00
static int sh_cmt_register_clockevent ( struct sh_cmt_channel * ch ,
const char * name )
2009-01-22 12:55:59 +03:00
{
2014-01-27 18:29:19 +04:00
struct clock_event_device * ced = & ch - > ced ;
2014-02-21 04:24:47 +04:00
int irq ;
int ret ;
2014-01-28 18:52:46 +04:00
irq = platform_get_irq ( ch - > cmt - > pdev , ch - > index ) ;
2014-02-21 04:24:47 +04:00
if ( irq < 0 ) {
dev_err ( & ch - > cmt - > pdev - > dev , " ch%u: failed to get irq \n " ,
ch - > index ) ;
return irq ;
}
ret = request_irq ( irq , sh_cmt_interrupt ,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING ,
dev_name ( & ch - > cmt - > pdev - > dev ) , ch ) ;
if ( ret ) {
dev_err ( & ch - > cmt - > pdev - > dev , " ch%u: failed to request irq %d \n " ,
ch - > index , irq ) ;
return ret ;
}
2009-01-22 12:55:59 +03:00
ced - > name = name ;
ced - > features = CLOCK_EVT_FEAT_PERIODIC ;
ced - > features | = CLOCK_EVT_FEAT_ONESHOT ;
2014-02-19 20:00:31 +04:00
ced - > rating = 125 ;
2014-02-19 19:19:44 +04:00
ced - > cpumask = cpu_possible_mask ;
2009-01-22 12:55:59 +03:00
ced - > set_next_event = sh_cmt_clock_event_next ;
ced - > set_mode = sh_cmt_clock_event_mode ;
2012-08-06 03:43:03 +04:00
ced - > suspend = sh_cmt_clock_event_suspend ;
ced - > resume = sh_cmt_clock_event_resume ;
2009-01-22 12:55:59 +03:00
2014-01-28 01:04:17 +04:00
dev_info ( & ch - > cmt - > pdev - > dev , " ch%u: used for clock events \n " ,
ch - > index ) ;
2009-01-22 12:55:59 +03:00
clockevents_register_device ( ced ) ;
2014-02-21 04:24:47 +04:00
return 0 ;
2009-01-22 12:55:59 +03:00
}
2014-02-17 19:04:16 +04:00
static int sh_cmt_register ( struct sh_cmt_channel * ch , const char * name ,
2014-02-19 20:00:31 +04:00
bool clockevent , bool clocksource )
2009-01-22 12:55:59 +03:00
{
2014-02-21 04:24:47 +04:00
int ret ;
2014-01-28 15:36:48 +04:00
if ( clockevent ) {
ch - > cmt - > has_clockevent = true ;
2014-02-21 04:24:47 +04:00
ret = sh_cmt_register_clockevent ( ch , name ) ;
if ( ret < 0 )
return ret ;
2014-01-28 15:36:48 +04:00
}
2009-01-22 12:55:59 +03:00
2014-01-28 15:36:48 +04:00
if ( clocksource ) {
ch - > cmt - > has_clocksource = true ;
2014-02-19 20:00:31 +04:00
sh_cmt_register_clocksource ( ch , name ) ;
2014-01-28 15:36:48 +04:00
}
2009-04-17 09:26:31 +04:00
2009-01-22 12:55:59 +03:00
return 0 ;
}
2014-01-28 01:04:17 +04:00
static int sh_cmt_setup_channel ( struct sh_cmt_channel * ch , unsigned int index ,
2014-01-28 15:36:48 +04:00
unsigned int hwidx , bool clockevent ,
bool clocksource , struct sh_cmt_device * cmt )
2014-01-28 01:04:17 +04:00
{
int ret ;
2014-01-28 15:36:48 +04:00
/* Skip unused channels. */
if ( ! clockevent & & ! clocksource )
return 0 ;
2014-01-28 01:04:17 +04:00
ch - > cmt = cmt ;
2014-01-28 01:04:17 +04:00
ch - > index = index ;
2014-01-28 15:36:48 +04:00
ch - > hwidx = hwidx ;
/*
* Compute the address of the channel control register block . For the
* timers with a per - channel start / stop register , compute its address
* as well .
*/
2014-01-28 18:52:46 +04:00
switch ( cmt - > info - > model ) {
case SH_CMT_16BIT :
ch - > ioctrl = cmt - > mapbase + 2 + ch - > hwidx * 6 ;
break ;
case SH_CMT_32BIT :
case SH_CMT_48BIT :
ch - > ioctrl = cmt - > mapbase + 0x10 + ch - > hwidx * 0x10 ;
break ;
case SH_CMT_32BIT_FAST :
/*
* The 32 - bit " fast " timer has a single channel at hwidx 5 but
* is located at offset 0x40 instead of 0x60 for some reason .
*/
ch - > ioctrl = cmt - > mapbase + 0x40 ;
break ;
case SH_CMT_48BIT_GEN2 :
ch - > iostart = cmt - > mapbase + ch - > hwidx * 0x100 ;
ch - > ioctrl = ch - > iostart + 0x10 ;
break ;
2014-01-28 15:36:48 +04:00
}
2014-02-12 02:46:48 +04:00
if ( cmt - > info - > width = = ( sizeof ( ch - > max_match_value ) * 8 ) )
2014-01-28 01:04:17 +04:00
ch - > max_match_value = ~ 0 ;
else
2014-02-12 02:46:48 +04:00
ch - > max_match_value = ( 1 < < cmt - > info - > width ) - 1 ;
2014-01-28 01:04:17 +04:00
ch - > match_value = ch - > max_match_value ;
raw_spin_lock_init ( & ch - > lock ) ;
2014-01-28 18:52:46 +04:00
ch - > timer_bit = cmt - > info - > model = = SH_CMT_48BIT_GEN2 ? 0 : ch - > hwidx ;
2014-01-28 15:36:48 +04:00
2014-02-17 19:04:16 +04:00
ret = sh_cmt_register ( ch , dev_name ( & cmt - > pdev - > dev ) ,
2014-01-28 15:36:48 +04:00
clockevent , clocksource ) ;
2014-01-28 01:04:17 +04:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
dev_err ( & cmt - > pdev - > dev , " ch%u: registration failed \n " ,
ch - > index ) ;
2014-01-28 01:04:17 +04:00
return ret ;
}
ch - > cs_enabled = false ;
return 0 ;
}
2014-01-28 15:36:48 +04:00
static int sh_cmt_map_memory ( struct sh_cmt_device * cmt )
2009-01-22 12:55:59 +03:00
{
2014-01-28 15:36:48 +04:00
struct resource * mem ;
2009-01-22 12:55:59 +03:00
2014-01-28 15:36:48 +04:00
mem = platform_get_resource ( cmt - > pdev , IORESOURCE_MEM , 0 ) ;
if ( ! mem ) {
dev_err ( & cmt - > pdev - > dev , " failed to get I/O memory \n " ) ;
return - ENXIO ;
}
2009-01-22 12:55:59 +03:00
2014-01-28 15:36:48 +04:00
cmt - > mapbase = ioremap_nocache ( mem - > start , resource_size ( mem ) ) ;
if ( cmt - > mapbase = = NULL ) {
dev_err ( & cmt - > pdev - > dev , " failed to remap I/O memory \n " ) ;
return - ENXIO ;
2009-01-22 12:55:59 +03:00
}
2014-01-28 15:36:48 +04:00
return 0 ;
}
2014-02-12 20:12:40 +04:00
static const struct platform_device_id sh_cmt_id_table [ ] = {
{ " sh-cmt-16 " , ( kernel_ulong_t ) & sh_cmt_info [ SH_CMT_16BIT ] } ,
{ " sh-cmt-32 " , ( kernel_ulong_t ) & sh_cmt_info [ SH_CMT_32BIT ] } ,
{ " sh-cmt-32-fast " , ( kernel_ulong_t ) & sh_cmt_info [ SH_CMT_32BIT_FAST ] } ,
{ " sh-cmt-48 " , ( kernel_ulong_t ) & sh_cmt_info [ SH_CMT_48BIT ] } ,
{ " sh-cmt-48-gen2 " , ( kernel_ulong_t ) & sh_cmt_info [ SH_CMT_48BIT_GEN2 ] } ,
{ }
} ;
MODULE_DEVICE_TABLE ( platform , sh_cmt_id_table ) ;
static const struct of_device_id sh_cmt_of_table [ ] __maybe_unused = {
{ . compatible = " renesas,cmt-32 " , . data = & sh_cmt_info [ SH_CMT_32BIT ] } ,
{ . compatible = " renesas,cmt-32-fast " , . data = & sh_cmt_info [ SH_CMT_32BIT_FAST ] } ,
{ . compatible = " renesas,cmt-48 " , . data = & sh_cmt_info [ SH_CMT_48BIT ] } ,
{ . compatible = " renesas,cmt-48-gen2 " , . data = & sh_cmt_info [ SH_CMT_48BIT_GEN2 ] } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , sh_cmt_of_table ) ;
static int sh_cmt_parse_dt ( struct sh_cmt_device * cmt )
{
struct device_node * np = cmt - > pdev - > dev . of_node ;
return of_property_read_u32 ( np , " renesas,channels-mask " ,
& cmt - > hw_channels ) ;
}
2014-01-28 15:36:48 +04:00
static int sh_cmt_setup ( struct sh_cmt_device * cmt , struct platform_device * pdev )
{
2014-01-28 18:52:46 +04:00
unsigned int mask ;
unsigned int i ;
2014-01-28 15:36:48 +04:00
int ret ;
memset ( cmt , 0 , sizeof ( * cmt ) ) ;
cmt - > pdev = pdev ;
2014-02-17 19:49:05 +04:00
raw_spin_lock_init ( & cmt - > lock ) ;
2014-01-28 15:36:48 +04:00
2014-02-12 20:12:40 +04:00
if ( IS_ENABLED ( CONFIG_OF ) & & pdev - > dev . of_node ) {
const struct of_device_id * id ;
id = of_match_node ( sh_cmt_of_table , pdev - > dev . of_node ) ;
cmt - > info = id - > data ;
ret = sh_cmt_parse_dt ( cmt ) ;
if ( ret < 0 )
return ret ;
} else if ( pdev - > dev . platform_data ) {
struct sh_timer_config * cfg = pdev - > dev . platform_data ;
const struct platform_device_id * id = pdev - > id_entry ;
cmt - > info = ( const struct sh_cmt_info * ) id - > driver_data ;
cmt - > hw_channels = cfg - > channels_mask ;
} else {
2014-01-28 15:36:48 +04:00
dev_err ( & cmt - > pdev - > dev , " missing platform data \n " ) ;
return - ENXIO ;
}
/* Get hold of clock. */
2014-01-28 18:52:46 +04:00
cmt - > clk = clk_get ( & cmt - > pdev - > dev , " fck " ) ;
2014-01-28 01:04:17 +04:00
if ( IS_ERR ( cmt - > clk ) ) {
dev_err ( & cmt - > pdev - > dev , " cannot get clock \n " ) ;
2014-01-28 15:36:48 +04:00
return PTR_ERR ( cmt - > clk ) ;
2009-01-22 12:55:59 +03:00
}
2014-01-28 01:04:17 +04:00
ret = clk_prepare ( cmt - > clk ) ;
2013-12-14 10:07:32 +04:00
if ( ret < 0 )
2014-01-28 15:36:48 +04:00
goto err_clk_put ;
2013-12-14 10:07:32 +04:00
2014-01-28 18:52:46 +04:00
/* Map the memory resource(s). */
ret = sh_cmt_map_memory ( cmt ) ;
2014-01-28 15:36:48 +04:00
if ( ret < 0 )
goto err_clk_unprepare ;
/* Allocate and setup the channels. */
2014-02-12 20:12:40 +04:00
cmt - > num_channels = hweight8 ( cmt - > hw_channels ) ;
2014-01-28 15:36:48 +04:00
cmt - > channels = kzalloc ( cmt - > num_channels * sizeof ( * cmt - > channels ) ,
GFP_KERNEL ) ;
2014-01-28 01:04:17 +04:00
if ( cmt - > channels = = NULL ) {
ret = - ENOMEM ;
2014-01-28 15:36:48 +04:00
goto err_unmap ;
2014-01-28 01:04:17 +04:00
}
2014-01-28 18:52:46 +04:00
/*
* Use the first channel as a clock event device and the second channel
* as a clock source . If only one channel is available use it for both .
*/
2014-02-12 20:12:40 +04:00
for ( i = 0 , mask = cmt - > hw_channels ; i < cmt - > num_channels ; + + i ) {
2014-01-28 18:52:46 +04:00
unsigned int hwidx = ffs ( mask ) - 1 ;
bool clocksource = i = = 1 | | cmt - > num_channels = = 1 ;
bool clockevent = i = = 0 ;
ret = sh_cmt_setup_channel ( & cmt - > channels [ i ] , i , hwidx ,
clockevent , clocksource , cmt ) ;
2014-01-28 15:36:48 +04:00
if ( ret < 0 )
goto err_unmap ;
2014-01-28 01:04:17 +04:00
2014-01-28 18:52:46 +04:00
mask & = ~ ( 1 < < hwidx ) ;
2014-01-28 15:36:48 +04:00
}
2010-02-25 10:37:46 +03:00
2014-01-28 01:04:17 +04:00
platform_set_drvdata ( pdev , cmt ) ;
2012-12-14 09:53:51 +04:00
2010-02-25 10:37:46 +03:00
return 0 ;
2014-01-28 15:36:48 +04:00
err_unmap :
2014-01-28 01:04:17 +04:00
kfree ( cmt - > channels ) ;
2014-01-28 18:52:46 +04:00
iounmap ( cmt - > mapbase ) ;
2014-01-28 15:36:48 +04:00
err_clk_unprepare :
2014-01-28 01:04:17 +04:00
clk_unprepare ( cmt - > clk ) ;
2014-01-28 15:36:48 +04:00
err_clk_put :
2014-01-28 01:04:17 +04:00
clk_put ( cmt - > clk ) ;
2009-01-22 12:55:59 +03:00
return ret ;
}
2012-12-22 03:11:38 +04:00
static int sh_cmt_probe ( struct platform_device * pdev )
2009-01-22 12:55:59 +03:00
{
2014-01-28 01:04:17 +04:00
struct sh_cmt_device * cmt = platform_get_drvdata ( pdev ) ;
2009-01-22 12:55:59 +03:00
int ret ;
2012-08-06 03:43:03 +04:00
if ( ! is_early_platform_device ( pdev ) ) {
2012-08-06 03:48:57 +04:00
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
2012-08-06 03:43:03 +04:00
}
2012-03-14 01:40:06 +04:00
2014-01-28 01:04:17 +04:00
if ( cmt ) {
2010-03-10 10:26:25 +03:00
dev_info ( & pdev - > dev , " kept as earlytimer \n " ) ;
2012-08-06 03:48:57 +04:00
goto out ;
2009-04-15 14:50:04 +04:00
}
2014-01-28 01:04:17 +04:00
cmt = kzalloc ( sizeof ( * cmt ) , GFP_KERNEL ) ;
2014-05-22 16:05:06 +04:00
if ( cmt = = NULL )
2009-01-22 12:55:59 +03:00
return - ENOMEM ;
2014-01-28 01:04:17 +04:00
ret = sh_cmt_setup ( cmt , pdev ) ;
2009-01-22 12:55:59 +03:00
if ( ret ) {
2014-01-28 01:04:17 +04:00
kfree ( cmt ) ;
2012-08-06 03:48:57 +04:00
pm_runtime_idle ( & pdev - > dev ) ;
return ret ;
2009-01-22 12:55:59 +03:00
}
2012-08-06 03:48:57 +04:00
if ( is_early_platform_device ( pdev ) )
return 0 ;
out :
2014-01-28 15:36:48 +04:00
if ( cmt - > has_clockevent | | cmt - > has_clocksource )
2012-08-06 03:48:57 +04:00
pm_runtime_irq_safe ( & pdev - > dev ) ;
else
pm_runtime_idle ( & pdev - > dev ) ;
return 0 ;
2009-01-22 12:55:59 +03:00
}
2012-12-22 03:11:38 +04:00
static int sh_cmt_remove ( struct platform_device * pdev )
2009-01-22 12:55:59 +03:00
{
return - EBUSY ; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_cmt_device_driver = {
. probe = sh_cmt_probe ,
2012-12-22 03:11:38 +04:00
. remove = sh_cmt_remove ,
2009-01-22 12:55:59 +03:00
. driver = {
. name = " sh_cmt " ,
2014-02-12 20:12:40 +04:00
. of_match_table = of_match_ptr ( sh_cmt_of_table ) ,
2014-01-28 15:36:48 +04:00
} ,
. id_table = sh_cmt_id_table ,
2009-01-22 12:55:59 +03:00
} ;
static int __init sh_cmt_init ( void )
{
return platform_driver_register ( & sh_cmt_device_driver ) ;
}
static void __exit sh_cmt_exit ( void )
{
platform_driver_unregister ( & sh_cmt_device_driver ) ;
}
2009-04-15 14:50:04 +04:00
early_platform_init ( " earlytimer " , & sh_cmt_device_driver ) ;
2013-03-05 10:40:42 +04:00
subsys_initcall ( sh_cmt_init ) ;
2009-01-22 12:55:59 +03:00
module_exit ( sh_cmt_exit ) ;
MODULE_AUTHOR ( " Magnus Damm " ) ;
MODULE_DESCRIPTION ( " SuperH CMT Timer Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;