2007-06-05 19:36:55 +04:00
/*
2009-03-21 03:29:01 +03:00
* Clock and PLL control for DaVinci devices
2007-06-05 19:36:55 +04:00
*
2009-03-21 03:29:01 +03:00
* Copyright ( C ) 2006 - 2007 Texas Instruments .
* Copyright ( C ) 2008 - 2009 Deep Root Systems , LLC
2007-06-05 19:36:55 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/errno.h>
2009-03-21 03:29:01 +03:00
# include <linux/clk.h>
2007-06-05 19:36:55 +04:00
# include <linux/err.h>
# include <linux/mutex.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2009-08-31 14:18:03 +04:00
# include <linux/delay.h>
2007-06-05 19:36:55 +04:00
2008-08-05 19:14:15 +04:00
# include <mach/hardware.h>
2007-06-05 19:36:55 +04:00
2010-02-26 02:36:38 +03:00
# include <mach/clock.h>
2008-08-05 19:14:15 +04:00
# include <mach/psc.h>
2009-03-21 03:29:01 +03:00
# include <mach/cputype.h>
2007-06-05 19:36:55 +04:00
# include "clock.h"
static LIST_HEAD ( clocks ) ;
static DEFINE_MUTEX ( clocks_mutex ) ;
static DEFINE_SPINLOCK ( clockfw_lock ) ;
2009-03-21 03:29:01 +03:00
static unsigned psc_domain ( struct clk * clk )
2007-06-05 19:36:55 +04:00
{
2009-03-21 03:29:01 +03:00
return ( clk - > flags & PSC_DSP )
? DAVINCI_GPSC_DSPDOMAIN
: DAVINCI_GPSC_ARMDOMAIN ;
2007-06-05 19:36:55 +04:00
}
2009-03-21 03:29:01 +03:00
static void __clk_enable ( struct clk * clk )
2007-06-05 19:36:55 +04:00
{
2009-03-21 03:29:01 +03:00
if ( clk - > parent )
__clk_enable ( clk - > parent ) ;
if ( clk - > usecount + + = = 0 & & ( clk - > flags & CLK_PSC ) )
2010-03-26 00:43:47 +03:00
davinci_psc_config ( psc_domain ( clk ) , clk - > gpsc , clk - > lpsc ,
PSC_STATE_ENABLE ) ;
2007-06-05 19:36:55 +04:00
}
static void __clk_disable ( struct clk * clk )
{
2009-03-21 03:29:01 +03:00
if ( WARN_ON ( clk - > usecount = = 0 ) )
2007-06-05 19:36:55 +04:00
return ;
2009-12-15 15:32:58 +03:00
if ( - - clk - > usecount = = 0 & & ! ( clk - > flags & CLK_PLL ) & &
( clk - > flags & CLK_PSC ) )
2010-03-26 00:43:47 +03:00
davinci_psc_config ( psc_domain ( clk ) , clk - > gpsc , clk - > lpsc ,
( clk - > flags & PSC_SWRSTDISABLE ) ?
PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE ) ;
2009-03-21 03:29:01 +03:00
if ( clk - > parent )
__clk_disable ( clk - > parent ) ;
2007-06-05 19:36:55 +04:00
}
int clk_enable ( struct clk * clk )
{
unsigned long flags ;
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
2009-03-21 03:29:01 +03:00
spin_lock_irqsave ( & clockfw_lock , flags ) ;
__clk_enable ( clk ) ;
spin_unlock_irqrestore ( & clockfw_lock , flags ) ;
2007-06-05 19:36:55 +04:00
2009-03-21 03:29:01 +03:00
return 0 ;
2007-06-05 19:36:55 +04:00
}
EXPORT_SYMBOL ( clk_enable ) ;
void clk_disable ( struct clk * clk )
{
unsigned long flags ;
if ( clk = = NULL | | IS_ERR ( clk ) )
return ;
2009-03-21 03:29:01 +03:00
spin_lock_irqsave ( & clockfw_lock , flags ) ;
__clk_disable ( clk ) ;
spin_unlock_irqrestore ( & clockfw_lock , flags ) ;
2007-06-05 19:36:55 +04:00
}
EXPORT_SYMBOL ( clk_disable ) ;
unsigned long clk_get_rate ( struct clk * clk )
{
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
2009-03-21 03:29:01 +03:00
return clk - > rate ;
2007-06-05 19:36:55 +04:00
}
EXPORT_SYMBOL ( clk_get_rate ) ;
long clk_round_rate ( struct clk * clk , unsigned long rate )
{
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
2009-08-31 14:18:03 +04:00
if ( clk - > round_rate )
return clk - > round_rate ( clk , rate ) ;
2009-03-21 03:29:01 +03:00
return clk - > rate ;
2007-06-05 19:36:55 +04:00
}
EXPORT_SYMBOL ( clk_round_rate ) ;
2009-08-31 14:18:03 +04:00
/* Propagate rate to children */
static void propagate_rate ( struct clk * root )
{
struct clk * clk ;
list_for_each_entry ( clk , & root - > children , childnode ) {
if ( clk - > recalc )
clk - > rate = clk - > recalc ( clk ) ;
propagate_rate ( clk ) ;
}
}
2007-06-05 19:36:55 +04:00
int clk_set_rate ( struct clk * clk , unsigned long rate )
{
2009-08-31 14:18:03 +04:00
unsigned long flags ;
int ret = - EINVAL ;
2007-06-05 19:36:55 +04:00
if ( clk = = NULL | | IS_ERR ( clk ) )
2009-08-31 14:18:03 +04:00
return ret ;
if ( clk - > set_rate )
ret = clk - > set_rate ( clk , rate ) ;
2010-01-12 16:25:35 +03:00
spin_lock_irqsave ( & clockfw_lock , flags ) ;
2009-08-31 14:18:03 +04:00
if ( ret = = 0 ) {
if ( clk - > recalc )
clk - > rate = clk - > recalc ( clk ) ;
propagate_rate ( clk ) ;
}
spin_unlock_irqrestore ( & clockfw_lock , flags ) ;
2007-06-05 19:36:55 +04:00
2009-08-31 14:18:03 +04:00
return ret ;
2007-06-05 19:36:55 +04:00
}
EXPORT_SYMBOL ( clk_set_rate ) ;
2009-08-31 14:18:04 +04:00
int clk_set_parent ( struct clk * clk , struct clk * parent )
{
unsigned long flags ;
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
/* Cannot change parent on enabled clock */
if ( WARN_ON ( clk - > usecount ) )
return - EINVAL ;
mutex_lock ( & clocks_mutex ) ;
clk - > parent = parent ;
list_del_init ( & clk - > childnode ) ;
list_add ( & clk - > childnode , & clk - > parent - > children ) ;
mutex_unlock ( & clocks_mutex ) ;
spin_lock_irqsave ( & clockfw_lock , flags ) ;
if ( clk - > recalc )
clk - > rate = clk - > recalc ( clk ) ;
propagate_rate ( clk ) ;
spin_unlock_irqrestore ( & clockfw_lock , flags ) ;
return 0 ;
}
EXPORT_SYMBOL ( clk_set_parent ) ;
2007-06-05 19:36:55 +04:00
int clk_register ( struct clk * clk )
{
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
2009-03-21 03:29:01 +03:00
if ( WARN ( clk - > parent & & ! clk - > parent - > rate ,
" CLK: %s parent %s has no rate! \n " ,
clk - > name , clk - > parent - > name ) )
return - EINVAL ;
2009-08-31 14:18:01 +04:00
INIT_LIST_HEAD ( & clk - > children ) ;
2007-06-05 19:36:55 +04:00
mutex_lock ( & clocks_mutex ) ;
2009-03-21 03:29:01 +03:00
list_add_tail ( & clk - > node , & clocks ) ;
2009-08-31 14:18:01 +04:00
if ( clk - > parent )
list_add_tail ( & clk - > childnode , & clk - > parent - > children ) ;
2007-06-05 19:36:55 +04:00
mutex_unlock ( & clocks_mutex ) ;
2009-03-21 03:29:01 +03:00
/* If rate is already set, use it */
if ( clk - > rate )
return 0 ;
2009-08-31 14:18:02 +04:00
/* Else, see if there is a way to calculate it */
if ( clk - > recalc )
clk - > rate = clk - > recalc ( clk ) ;
2009-03-21 03:29:01 +03:00
/* Otherwise, default to parent rate */
2009-08-31 14:18:02 +04:00
else if ( clk - > parent )
2009-03-21 03:29:01 +03:00
clk - > rate = clk - > parent - > rate ;
2007-06-05 19:36:55 +04:00
return 0 ;
}
EXPORT_SYMBOL ( clk_register ) ;
void clk_unregister ( struct clk * clk )
{
if ( clk = = NULL | | IS_ERR ( clk ) )
return ;
mutex_lock ( & clocks_mutex ) ;
list_del ( & clk - > node ) ;
2009-08-31 14:18:01 +04:00
list_del ( & clk - > childnode ) ;
2007-06-05 19:36:55 +04:00
mutex_unlock ( & clocks_mutex ) ;
}
EXPORT_SYMBOL ( clk_unregister ) ;
2009-03-21 03:29:01 +03:00
# ifdef CONFIG_DAVINCI_RESET_CLOCKS
/*
* Disable any unused clocks left on by the bootloader
*/
static int __init clk_disable_unused ( void )
{
struct clk * ck ;
spin_lock_irq ( & clockfw_lock ) ;
list_for_each_entry ( ck , & clocks , node ) {
if ( ck - > usecount > 0 )
continue ;
if ( ! ( ck - > flags & CLK_PSC ) )
continue ;
/* ignore if in Disabled or SwRstDisable states */
2009-09-30 19:48:03 +04:00
if ( ! davinci_psc_is_clk_active ( ck - > gpsc , ck - > lpsc ) )
2009-03-21 03:29:01 +03:00
continue ;
2010-08-05 21:55:16 +04:00
pr_debug ( " Clocks: disable unused %s \n " , ck - > name ) ;
2010-03-26 00:43:47 +03:00
davinci_psc_config ( psc_domain ( ck ) , ck - > gpsc , ck - > lpsc ,
( ck - > flags & PSC_SWRSTDISABLE ) ?
PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE ) ;
2007-06-05 19:36:55 +04:00
}
2009-03-21 03:29:01 +03:00
spin_unlock_irq ( & clockfw_lock ) ;
return 0 ;
}
late_initcall ( clk_disable_unused ) ;
# endif
2007-06-05 19:36:55 +04:00
2009-08-31 14:18:02 +04:00
static unsigned long clk_sysclk_recalc ( struct clk * clk )
2007-06-05 19:36:55 +04:00
{
2009-03-21 03:29:01 +03:00
u32 v , plldiv ;
struct pll_data * pll ;
2009-08-31 14:18:02 +04:00
unsigned long rate = clk - > rate ;
2009-03-21 03:29:01 +03:00
/* If this is the PLL base clock, no more calculations needed */
if ( clk - > pll_data )
2009-08-31 14:18:02 +04:00
return rate ;
2009-03-21 03:29:01 +03:00
if ( WARN_ON ( ! clk - > parent ) )
2009-08-31 14:18:02 +04:00
return rate ;
2009-03-21 03:29:01 +03:00
2009-08-31 14:18:02 +04:00
rate = clk - > parent - > rate ;
2009-03-21 03:29:01 +03:00
/* Otherwise, the parent must be a PLL */
if ( WARN_ON ( ! clk - > parent - > pll_data ) )
2009-08-31 14:18:02 +04:00
return rate ;
2009-03-21 03:29:01 +03:00
pll = clk - > parent - > pll_data ;
/* If pre-PLL, source clock is before the multiplier and divider(s) */
if ( clk - > flags & PRE_PLL )
2009-08-31 14:18:02 +04:00
rate = pll - > input_rate ;
2009-03-21 03:29:01 +03:00
if ( ! clk - > div_reg )
2009-08-31 14:18:02 +04:00
return rate ;
2009-03-21 03:29:01 +03:00
v = __raw_readl ( pll - > base + clk - > div_reg ) ;
if ( v & PLLDIV_EN ) {
2010-04-14 22:44:49 +04:00
plldiv = ( v & pll - > div_ratio_mask ) + 1 ;
2009-03-21 03:29:01 +03:00
if ( plldiv )
2009-08-31 14:18:02 +04:00
rate / = plldiv ;
2009-03-21 03:29:01 +03:00
}
2009-08-31 14:18:02 +04:00
return rate ;
}
2010-07-20 15:16:49 +04:00
int davinci_set_sysclk_rate ( struct clk * clk , unsigned long rate )
{
unsigned v ;
struct pll_data * pll ;
unsigned long input ;
unsigned ratio = 0 ;
/* If this is the PLL base clock, wrong function to call */
if ( clk - > pll_data )
return - EINVAL ;
/* There must be a parent... */
if ( WARN_ON ( ! clk - > parent ) )
return - EINVAL ;
/* ... the parent must be a PLL... */
if ( WARN_ON ( ! clk - > parent - > pll_data ) )
return - EINVAL ;
/* ... and this clock must have a divider. */
if ( WARN_ON ( ! clk - > div_reg ) )
return - EINVAL ;
pll = clk - > parent - > pll_data ;
input = clk - > parent - > rate ;
/* If pre-PLL, source clock is before the multiplier and divider(s) */
if ( clk - > flags & PRE_PLL )
input = pll - > input_rate ;
if ( input > rate ) {
/*
* Can afford to provide an output little higher than requested
* only if maximum rate supported by hardware on this sysclk
* is known .
*/
if ( clk - > maxrate ) {
ratio = DIV_ROUND_CLOSEST ( input , rate ) ;
if ( input / ratio > clk - > maxrate )
ratio = 0 ;
}
if ( ratio = = 0 )
ratio = DIV_ROUND_UP ( input , rate ) ;
ratio - - ;
}
2010-10-21 01:49:56 +04:00
if ( ratio > pll - > div_ratio_mask )
2010-07-20 15:16:49 +04:00
return - EINVAL ;
do {
v = __raw_readl ( pll - > base + PLLSTAT ) ;
} while ( v & PLLSTAT_GOSTAT ) ;
v = __raw_readl ( pll - > base + clk - > div_reg ) ;
2010-10-21 01:49:56 +04:00
v & = ~ pll - > div_ratio_mask ;
2010-07-20 15:16:49 +04:00
v | = ratio | PLLDIV_EN ;
__raw_writel ( v , pll - > base + clk - > div_reg ) ;
v = __raw_readl ( pll - > base + PLLCMD ) ;
v | = PLLCMD_GOSET ;
__raw_writel ( v , pll - > base + PLLCMD ) ;
do {
v = __raw_readl ( pll - > base + PLLSTAT ) ;
} while ( v & PLLSTAT_GOSTAT ) ;
return 0 ;
}
EXPORT_SYMBOL ( davinci_set_sysclk_rate ) ;
2009-08-31 14:18:02 +04:00
static unsigned long clk_leafclk_recalc ( struct clk * clk )
{
if ( WARN_ON ( ! clk - > parent ) )
return clk - > rate ;
return clk - > parent - > rate ;
2009-03-21 03:29:01 +03:00
}
2009-08-31 14:18:02 +04:00
static unsigned long clk_pllclk_recalc ( struct clk * clk )
2009-03-21 03:29:01 +03:00
{
u32 ctrl , mult = 1 , prediv = 1 , postdiv = 1 ;
u8 bypass ;
struct pll_data * pll = clk - > pll_data ;
2009-08-31 14:18:02 +04:00
unsigned long rate = clk - > rate ;
2009-03-21 03:29:01 +03:00
ctrl = __raw_readl ( pll - > base + PLLCTL ) ;
2009-08-31 14:18:02 +04:00
rate = pll - > input_rate = clk - > parent - > rate ;
2009-03-21 03:29:01 +03:00
if ( ctrl & PLLCTL_PLLEN ) {
bypass = 0 ;
mult = __raw_readl ( pll - > base + PLLM ) ;
2009-06-11 17:41:05 +04:00
if ( cpu_is_davinci_dm365 ( ) )
mult = 2 * ( mult & PLLM_PLLM_MASK ) ;
else
mult = ( mult & PLLM_PLLM_MASK ) + 1 ;
2009-03-21 03:29:01 +03:00
} else
bypass = 1 ;
if ( pll - > flags & PLL_HAS_PREDIV ) {
prediv = __raw_readl ( pll - > base + PREDIV ) ;
if ( prediv & PLLDIV_EN )
2010-04-14 22:44:49 +04:00
prediv = ( prediv & pll - > div_ratio_mask ) + 1 ;
2009-03-21 03:29:01 +03:00
else
prediv = 1 ;
}
/* pre-divider is fixed, but (some?) chips won't report that */
if ( cpu_is_davinci_dm355 ( ) & & pll - > num = = 1 )
prediv = 8 ;
if ( pll - > flags & PLL_HAS_POSTDIV ) {
postdiv = __raw_readl ( pll - > base + POSTDIV ) ;
if ( postdiv & PLLDIV_EN )
2010-04-14 22:44:49 +04:00
postdiv = ( postdiv & pll - > div_ratio_mask ) + 1 ;
2009-03-21 03:29:01 +03:00
else
postdiv = 1 ;
}
if ( ! bypass ) {
2009-08-31 14:18:02 +04:00
rate / = prediv ;
rate * = mult ;
rate / = postdiv ;
2009-03-21 03:29:01 +03:00
}
pr_debug ( " PLL%d: input = %lu MHz [ " ,
pll - > num , clk - > parent - > rate / 1000000 ) ;
if ( bypass )
pr_debug ( " bypass " ) ;
if ( prediv > 1 )
pr_debug ( " / %d " , prediv ) ;
if ( mult > 1 )
pr_debug ( " * %d " , mult ) ;
if ( postdiv > 1 )
pr_debug ( " / %d " , postdiv ) ;
2009-08-31 14:18:02 +04:00
pr_debug ( " ] --> %lu MHz output. \n " , rate / 1000000 ) ;
return rate ;
2009-03-21 03:29:01 +03:00
}
2009-08-31 14:18:03 +04:00
/**
* davinci_set_pllrate - set the output rate of a given PLL .
*
* Note : Currently tested to work with OMAP - L138 only .
*
* @ pll : pll whose rate needs to be changed .
* @ prediv : The pre divider value . Passing 0 disables the pre - divider .
* @ pllm : The multiplier value . Passing 0 leads to multiply - by - one .
* @ postdiv : The post divider value . Passing 0 disables the post - divider .
*/
int davinci_set_pllrate ( struct pll_data * pll , unsigned int prediv ,
unsigned int mult , unsigned int postdiv )
{
u32 ctrl ;
unsigned int locktime ;
2010-01-12 16:25:35 +03:00
unsigned long flags ;
2009-08-31 14:18:03 +04:00
if ( pll - > base = = NULL )
return - EINVAL ;
/*
* PLL lock time required per OMAP - L138 datasheet is
* ( 2000 * prediv ) / sqrt ( pllm ) OSCIN cycles . We approximate sqrt ( pllm )
* as 4 and OSCIN cycle as 25 MHz .
*/
if ( prediv ) {
locktime = ( ( 2000 * prediv ) / 100 ) ;
prediv = ( prediv - 1 ) | PLLDIV_EN ;
} else {
2009-11-16 14:51:33 +03:00
locktime = PLL_LOCK_TIME ;
2009-08-31 14:18:03 +04:00
}
if ( postdiv )
postdiv = ( postdiv - 1 ) | PLLDIV_EN ;
if ( mult )
mult = mult - 1 ;
2010-01-12 16:25:35 +03:00
/* Protect against simultaneous calls to PLL setting seqeunce */
spin_lock_irqsave ( & clockfw_lock , flags ) ;
2009-08-31 14:18:03 +04:00
ctrl = __raw_readl ( pll - > base + PLLCTL ) ;
/* Switch the PLL to bypass mode */
ctrl & = ~ ( PLLCTL_PLLENSRC | PLLCTL_PLLEN ) ;
__raw_writel ( ctrl , pll - > base + PLLCTL ) ;
2009-11-16 14:51:33 +03:00
udelay ( PLL_BYPASS_TIME ) ;
2009-08-31 14:18:03 +04:00
/* Reset and enable PLL */
ctrl & = ~ ( PLLCTL_PLLRST | PLLCTL_PLLDIS ) ;
__raw_writel ( ctrl , pll - > base + PLLCTL ) ;
if ( pll - > flags & PLL_HAS_PREDIV )
__raw_writel ( prediv , pll - > base + PREDIV ) ;
__raw_writel ( mult , pll - > base + PLLM ) ;
if ( pll - > flags & PLL_HAS_POSTDIV )
__raw_writel ( postdiv , pll - > base + POSTDIV ) ;
2009-11-16 14:51:33 +03:00
udelay ( PLL_RESET_TIME ) ;
2009-08-31 14:18:03 +04:00
/* Bring PLL out of reset */
ctrl | = PLLCTL_PLLRST ;
__raw_writel ( ctrl , pll - > base + PLLCTL ) ;
udelay ( locktime ) ;
/* Remove PLL from bypass mode */
ctrl | = PLLCTL_PLLEN ;
__raw_writel ( ctrl , pll - > base + PLLCTL ) ;
2010-01-12 16:25:35 +03:00
spin_unlock_irqrestore ( & clockfw_lock , flags ) ;
2009-08-31 14:18:03 +04:00
return 0 ;
}
EXPORT_SYMBOL ( davinci_set_pllrate ) ;
2010-01-11 19:22:23 +03:00
int __init davinci_clk_init ( struct clk_lookup * clocks )
2009-03-21 03:29:01 +03:00
{
2010-01-11 19:22:23 +03:00
struct clk_lookup * c ;
2009-03-21 03:29:01 +03:00
struct clk * clk ;
2010-01-11 19:22:23 +03:00
size_t num_clocks = 0 ;
2009-03-21 03:29:01 +03:00
2010-01-11 19:22:23 +03:00
for ( c = clocks ; c - > clk ; c + + ) {
clk = c - > clk ;
2009-03-21 03:29:01 +03:00
2009-08-31 14:18:02 +04:00
if ( ! clk - > recalc ) {
/* Check if clock is a PLL */
if ( clk - > pll_data )
clk - > recalc = clk_pllclk_recalc ;
/* Else, if it is a PLL-derived clock */
else if ( clk - > flags & CLK_PLL )
clk - > recalc = clk_sysclk_recalc ;
/* Otherwise, it is a leaf clock (PSC clock) */
else if ( clk - > parent )
clk - > recalc = clk_leafclk_recalc ;
}
2009-03-21 03:29:01 +03:00
2010-05-08 01:06:36 +04:00
if ( clk - > pll_data ) {
struct pll_data * pll = clk - > pll_data ;
if ( ! pll - > div_ratio_mask )
pll - > div_ratio_mask = PLLDIV_RATIO_MASK ;
if ( pll - > phys_base & & ! pll - > base ) {
pll - > base = ioremap ( pll - > phys_base , SZ_4K ) ;
WARN_ON ( ! pll - > base ) ;
}
}
2010-04-14 22:44:49 +04:00
2009-08-31 14:18:02 +04:00
if ( clk - > recalc )
clk - > rate = clk - > recalc ( clk ) ;
2009-03-21 03:29:01 +03:00
if ( clk - > lpsc )
clk - > flags | = CLK_PSC ;
clk_register ( clk ) ;
2010-01-11 19:22:23 +03:00
num_clocks + + ;
2009-03-21 03:29:01 +03:00
/* Turn on clocks that Linux doesn't otherwise manage */
if ( clk - > flags & ALWAYS_ENABLED )
clk_enable ( clk ) ;
2007-06-05 19:36:55 +04:00
}
2010-01-11 19:22:23 +03:00
clkdev_add_table ( clocks , num_clocks ) ;
2007-06-05 19:36:55 +04:00
return 0 ;
}
2009-12-03 13:06:52 +03:00
# ifdef CONFIG_DEBUG_FS
2007-06-05 19:36:55 +04:00
2009-12-03 13:06:52 +03:00
# include <linux/debugfs.h>
# include <linux/seq_file.h>
2007-06-05 19:36:55 +04:00
2009-03-21 03:29:01 +03:00
# define CLKNAME_MAX 10 /* longest clock name */
# define NEST_DELTA 2
# define NEST_MAX 4
static void
dump_clock ( struct seq_file * s , unsigned nest , struct clk * parent )
2007-06-05 19:36:55 +04:00
{
2009-03-21 03:29:01 +03:00
char * state ;
char buf [ CLKNAME_MAX + NEST_DELTA * NEST_MAX ] ;
struct clk * clk ;
unsigned i ;
if ( parent - > flags & CLK_PLL )
state = " pll " ;
else if ( parent - > flags & CLK_PSC )
state = " psc " ;
else
state = " " ;
/* <nest spaces> name <pad to end> */
memset ( buf , ' ' , sizeof ( buf ) - 1 ) ;
buf [ sizeof ( buf ) - 1 ] = 0 ;
i = strlen ( parent - > name ) ;
memcpy ( buf + nest , parent - > name ,
min ( i , ( unsigned ) ( sizeof ( buf ) - 1 - nest ) ) ) ;
seq_printf ( s , " %s users=%2d %-3s %9ld Hz \n " ,
buf , parent - > usecount , state , clk_get_rate ( parent ) ) ;
/* REVISIT show device associations too */
/* cost is now small, but not linear... */
2009-08-31 14:18:01 +04:00
list_for_each_entry ( clk , & parent - > children , childnode ) {
dump_clock ( s , nest + NEST_DELTA , clk ) ;
2009-03-21 03:29:01 +03:00
}
}
2007-06-05 19:36:55 +04:00
2009-03-21 03:29:01 +03:00
static int davinci_ck_show ( struct seq_file * m , void * v )
{
2009-12-03 13:06:51 +03:00
struct clk * clk ;
/*
* Show clock tree ; We trust nonzero usecounts equate to PSC enables . . .
2009-03-21 03:29:01 +03:00
*/
mutex_lock ( & clocks_mutex ) ;
2009-12-03 13:06:51 +03:00
list_for_each_entry ( clk , & clocks , node )
if ( ! clk - > parent )
dump_clock ( m , 0 , clk ) ;
2009-03-21 03:29:01 +03:00
mutex_unlock ( & clocks_mutex ) ;
2007-06-05 19:36:55 +04:00
return 0 ;
}
static int davinci_ck_open ( struct inode * inode , struct file * file )
{
2009-12-03 13:06:52 +03:00
return single_open ( file , davinci_ck_show , NULL ) ;
2007-06-05 19:36:55 +04:00
}
2009-12-03 13:06:52 +03:00
static const struct file_operations davinci_ck_operations = {
2007-06-05 19:36:55 +04:00
. open = davinci_ck_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2009-12-03 13:06:52 +03:00
. release = single_release ,
2007-06-05 19:36:55 +04:00
} ;
2009-12-03 13:06:52 +03:00
static int __init davinci_clk_debugfs_init ( void )
2007-06-05 19:36:55 +04:00
{
2009-12-03 13:06:52 +03:00
debugfs_create_file ( " davinci_clocks " , S_IFREG | S_IRUGO , NULL , NULL ,
& davinci_ck_operations ) ;
2007-06-05 19:36:55 +04:00
return 0 ;
}
2009-12-03 13:06:52 +03:00
device_initcall ( davinci_clk_debugfs_init ) ;
# endif /* CONFIG_DEBUG_FS */