2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2005-11-10 14:26:48 +00:00
/*
* linux / arch / arm / mach - omap1 / clock . c
*
2010-02-22 22:09:26 -07:00
* Copyright ( C ) 2004 - 2005 , 2009 - 2010 Nokia Corporation
2005-11-10 14:26:48 +00:00
* Written by Tuukka Tikkanen < tuukka . tikkanen @ elektrobit . com >
*
* Modified to use omap shared clock framework by
* Tony Lindgren < tony @ atomide . com >
*/
# include <linux/kernel.h>
2012-09-27 10:33:33 -06:00
# include <linux/export.h>
2005-11-10 14:26:48 +00:00
# include <linux/list.h>
# include <linux/errno.h>
# include <linux/err.h>
2012-02-24 10:34:35 -08:00
# include <linux/io.h>
2006-01-07 16:15:52 +00:00
# include <linux/clk.h>
2010-11-17 10:04:33 +01:00
# include <linux/clkdev.h>
2022-04-10 15:07:57 +02:00
# include <linux/clk-provider.h>
2019-08-06 16:16:03 +02:00
# include <linux/soc/ti/omap1-io.h>
2022-04-10 15:07:57 +02:00
# include <linux/spinlock.h>
2005-11-10 14:26:48 +00:00
2006-09-25 13:27:20 +03:00
# include <asm/mach-types.h>
2005-11-10 14:26:48 +00:00
2019-08-06 16:16:03 +02:00
# include "hardware.h"
2012-10-24 15:05:45 -07:00
# include "soc.h"
2012-02-24 10:34:34 -08:00
# include "iomap.h"
2005-11-10 14:26:48 +00:00
# include "clock.h"
2009-12-08 16:29:38 -07:00
# include "opp.h"
2012-10-29 13:54:06 -07:00
# include "sram.h"
2009-12-08 16:29:38 -07:00
__u32 arm_idlect1_mask ;
2022-04-10 15:07:57 +02:00
/* provide direct internal access (not via clk API) to some clocks */
struct omap1_clk * api_ck_p , * ck_dpll1_p , * ck_ref_p ;
2009-12-08 16:29:38 -07:00
2022-04-10 15:07:57 +02:00
/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
static DEFINE_SPINLOCK ( arm_ckctl_lock ) ;
static DEFINE_SPINLOCK ( arm_idlect2_lock ) ;
static DEFINE_SPINLOCK ( mod_conf_ctrl_0_lock ) ;
static DEFINE_SPINLOCK ( mod_conf_ctrl_1_lock ) ;
static DEFINE_SPINLOCK ( swd_clk_div_ctrl_sel_lock ) ;
2012-09-27 10:33:33 -06:00
2010-07-26 16:34:28 -06:00
/*
2009-12-08 16:29:38 -07:00
* Omap1 specific clock functions
2010-07-26 16:34:28 -06:00
*/
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
unsigned long omap1_uart_recalc ( struct omap1_clk * clk , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
2009-01-28 12:18:48 -07:00
unsigned int val = __raw_readl ( clk - > enable_reg ) ;
2022-04-10 15:07:54 +02:00
return val & 1 < < clk - > enable_bit ? 48000000 : 12000000 ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
unsigned long omap1_sossi_recalc ( struct omap1_clk * clk , unsigned long p_rate )
2007-03-05 17:22:58 +02:00
{
u32 div = omap_readl ( MOD_CONF_CTRL_1 ) ;
div = ( div > > 17 ) & 0x7 ;
div + + ;
2009-02-12 10:12:59 +00:00
2022-04-10 15:07:57 +02:00
return p_rate / div ;
2007-03-05 17:22:58 +02:00
}
2022-04-10 15:07:57 +02:00
static void omap1_clk_allow_idle ( struct omap1_clk * clk )
2005-11-10 14:26:48 +00:00
{
struct arm_idlect1_clk * iclk = ( struct arm_idlect1_clk * ) clk ;
if ( ! ( clk - > flags & CLOCK_IDLE_CONTROL ) )
return ;
if ( iclk - > no_idle_count > 0 & & ! ( - - iclk - > no_idle_count ) )
arm_idlect1_mask | = 1 < < iclk - > idlect_shift ;
}
2022-04-10 15:07:57 +02:00
static void omap1_clk_deny_idle ( struct omap1_clk * clk )
2005-11-10 14:26:48 +00:00
{
struct arm_idlect1_clk * iclk = ( struct arm_idlect1_clk * ) clk ;
if ( ! ( clk - > flags & CLOCK_IDLE_CONTROL ) )
return ;
if ( iclk - > no_idle_count + + = = 0 )
arm_idlect1_mask & = ~ ( 1 < < iclk - > idlect_shift ) ;
}
static __u16 verify_ckctl_value ( __u16 newval )
{
/* This function checks for following limitations set
* by the hardware ( all conditions must be true ) :
* DSPMMU_CK = = DSP_CK or DSPMMU_CK = = DSP_CK / 2
* ARM_CK > = TC_CK
* DSP_CK > = TC_CK
* DSPMMU_CK > = TC_CK
*
* In addition following rules are enforced :
* LCD_CK < = TC_CK
* ARMPER_CK < = TC_CK
*
* However , maximum frequencies are not checked for !
*/
__u8 per_exp ;
__u8 lcd_exp ;
__u8 arm_exp ;
__u8 dsp_exp ;
__u8 tc_exp ;
__u8 dspmmu_exp ;
per_exp = ( newval > > CKCTL_PERDIV_OFFSET ) & 3 ;
lcd_exp = ( newval > > CKCTL_LCDDIV_OFFSET ) & 3 ;
arm_exp = ( newval > > CKCTL_ARMDIV_OFFSET ) & 3 ;
dsp_exp = ( newval > > CKCTL_DSPDIV_OFFSET ) & 3 ;
tc_exp = ( newval > > CKCTL_TCDIV_OFFSET ) & 3 ;
dspmmu_exp = ( newval > > CKCTL_DSPMMUDIV_OFFSET ) & 3 ;
if ( dspmmu_exp < dsp_exp )
dspmmu_exp = dsp_exp ;
if ( dspmmu_exp > dsp_exp + 1 )
dspmmu_exp = dsp_exp + 1 ;
if ( tc_exp < arm_exp )
tc_exp = arm_exp ;
if ( tc_exp < dspmmu_exp )
tc_exp = dspmmu_exp ;
if ( tc_exp > lcd_exp )
lcd_exp = tc_exp ;
if ( tc_exp > per_exp )
per_exp = tc_exp ;
newval & = 0xf000 ;
newval | = per_exp < < CKCTL_PERDIV_OFFSET ;
newval | = lcd_exp < < CKCTL_LCDDIV_OFFSET ;
newval | = arm_exp < < CKCTL_ARMDIV_OFFSET ;
newval | = dsp_exp < < CKCTL_DSPDIV_OFFSET ;
newval | = tc_exp < < CKCTL_TCDIV_OFFSET ;
newval | = dspmmu_exp < < CKCTL_DSPMMUDIV_OFFSET ;
return newval ;
}
2022-04-10 15:07:57 +02:00
static int calc_dsor_exp ( unsigned long rate , unsigned long realrate )
2005-11-10 14:26:48 +00:00
{
/* Note: If target frequency is too low, this function will return 4,
* which is invalid value . Caller must check for this value and act
* accordingly .
*
* Note : This function does not check for following limitations set
* by the hardware ( all conditions must be true ) :
* DSPMMU_CK = = DSP_CK or DSPMMU_CK = = DSP_CK / 2
* ARM_CK > = TC_CK
* DSP_CK > = TC_CK
* DSPMMU_CK > = TC_CK
*/
unsigned dsor_exp ;
2022-04-10 15:07:57 +02:00
if ( unlikely ( realrate = = 0 ) )
2005-11-10 14:26:48 +00:00
return - EIO ;
for ( dsor_exp = 0 ; dsor_exp < 4 ; dsor_exp + + ) {
if ( realrate < = rate )
break ;
realrate / = 2 ;
}
return dsor_exp ;
}
2022-04-10 15:07:57 +02:00
unsigned long omap1_ckctl_recalc ( struct omap1_clk * clk , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
/* Calculate divisor encoded as 2-bit exponent */
2009-02-12 10:12:59 +00:00
int dsor = 1 < < ( 3 & ( omap_readw ( ARM_CKCTL ) > > clk - > rate_offset ) ) ;
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
/* update locally maintained rate, required by arm_ck for omap1_show_rates() */
clk - > rate = p_rate / dsor ;
return clk - > rate ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
static int omap1_clk_is_enabled ( struct clk_hw * hw )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
bool api_ck_was_enabled = true ;
__u32 regval32 ;
int ret ;
if ( ! clk - > ops ) /* no gate -- always enabled */
return 1 ;
if ( clk - > ops = = & clkops_dspck ) {
api_ck_was_enabled = omap1_clk_is_enabled ( & api_ck_p - > hw ) ;
if ( ! api_ck_was_enabled )
if ( api_ck_p - > ops - > enable ( api_ck_p ) < 0 )
return 0 ;
}
if ( clk - > flags & ENABLE_REG_32BIT )
regval32 = __raw_readl ( clk - > enable_reg ) ;
else
regval32 = __raw_readw ( clk - > enable_reg ) ;
ret = regval32 & ( 1 < < clk - > enable_bit ) ;
if ( ! api_ck_was_enabled )
api_ck_p - > ops - > disable ( api_ck_p ) ;
return ret ;
}
unsigned long omap1_ckctl_recalc_dsp_domain ( struct omap1_clk * clk , unsigned long p_rate )
{
bool api_ck_was_enabled ;
2005-11-10 14:26:48 +00:00
int dsor ;
/* Calculate divisor encoded as 2-bit exponent
*
* The clock control bits are in DSP domain ,
* so api_ck is needed for access .
* Note that DSP_CKCTL virt addr = phys addr , so
* we must use __raw_readw ( ) instead of omap_readw ( ) .
*/
2022-04-10 15:07:57 +02:00
api_ck_was_enabled = omap1_clk_is_enabled ( & api_ck_p - > hw ) ;
if ( ! api_ck_was_enabled )
api_ck_p - > ops - > enable ( api_ck_p ) ;
2005-11-10 14:26:48 +00:00
dsor = 1 < < ( 3 & ( __raw_readw ( DSP_CKCTL ) > > clk - > rate_offset ) ) ;
2022-04-10 15:07:57 +02:00
if ( ! api_ck_was_enabled )
api_ck_p - > ops - > disable ( api_ck_p ) ;
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
return p_rate / dsor ;
2005-11-10 14:26:48 +00:00
}
/* MPU virtual clock functions */
2022-04-10 15:07:57 +02:00
int omap1_select_table_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr ;
2012-04-13 06:34:32 -06:00
unsigned long ref_rate ;
2009-12-08 16:29:38 -07:00
2010-01-19 17:30:55 -07:00
ref_rate = ck_ref_p - > rate ;
2005-11-10 14:26:48 +00:00
2009-12-08 16:29:38 -07:00
for ( ptr = omap1_rate_table ; ptr - > rate ; ptr + + ) {
2011-12-08 18:01:41 -08:00
if ( ! ( ptr - > flags & cpu_mask ) )
continue ;
2009-12-08 16:29:38 -07:00
if ( ptr - > xtal ! = ref_rate )
2005-11-10 14:26:48 +00:00
continue ;
/* Can check only after xtal frequency check */
if ( ptr - > rate < = rate )
break ;
}
if ( ! ptr - > rate )
return - EINVAL ;
/*
* In most cases we should not need to reprogram DPLL .
* Reprogramming the DPLL is tricky , it must be done from SRAM .
*/
2011-12-01 22:16:26 +01:00
omap_sram_reprogram_clock ( ptr - > dpllctl_val , ptr - > ckctl_val ) ;
2005-11-10 14:26:48 +00:00
2009-12-08 16:29:38 -07:00
/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
ck_dpll1_p - > rate = ptr - > pll_rate ;
2005-11-10 14:26:48 +00:00
return 0 ;
}
2022-04-10 15:07:57 +02:00
int omap1_clk_set_rate_dsp_domain ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
2009-02-08 16:07:46 +00:00
int dsor_exp ;
u16 regval ;
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
dsor_exp = calc_dsor_exp ( rate , p_rate ) ;
2009-02-08 16:07:46 +00:00
if ( dsor_exp > 3 )
dsor_exp = - EINVAL ;
if ( dsor_exp < 0 )
return dsor_exp ;
regval = __raw_readw ( DSP_CKCTL ) ;
regval & = ~ ( 3 < < clk - > rate_offset ) ;
regval | = dsor_exp < < clk - > rate_offset ;
__raw_writew ( regval , DSP_CKCTL ) ;
2022-04-10 15:07:57 +02:00
clk - > rate = p_rate / ( 1 < < dsor_exp ) ;
2009-02-08 16:07:46 +00:00
return 0 ;
}
2022-04-10 15:07:57 +02:00
long omap1_clk_round_rate_ckctl_arm ( struct omap1_clk * clk , unsigned long rate ,
unsigned long * p_rate )
2009-02-08 16:07:46 +00:00
{
2022-04-10 15:07:57 +02:00
int dsor_exp = calc_dsor_exp ( rate , * p_rate ) ;
2009-02-08 16:07:46 +00:00
if ( dsor_exp < 0 )
return dsor_exp ;
if ( dsor_exp > 3 )
dsor_exp = 3 ;
2022-04-10 15:07:57 +02:00
return * p_rate / ( 1 < < dsor_exp ) ;
2009-02-08 16:07:46 +00:00
}
2022-04-10 15:07:57 +02:00
int omap1_clk_set_rate_ckctl_arm ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
2009-02-08 16:07:46 +00:00
{
2022-04-10 15:07:57 +02:00
unsigned long flags ;
2009-02-08 16:07:46 +00:00
int dsor_exp ;
u16 regval ;
2022-04-10 15:07:57 +02:00
dsor_exp = calc_dsor_exp ( rate , p_rate ) ;
2009-02-08 16:07:46 +00:00
if ( dsor_exp > 3 )
dsor_exp = - EINVAL ;
if ( dsor_exp < 0 )
return dsor_exp ;
2022-04-10 15:07:57 +02:00
/* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
spin_lock_irqsave ( & arm_ckctl_lock , flags ) ;
2009-02-08 16:07:46 +00:00
regval = omap_readw ( ARM_CKCTL ) ;
regval & = ~ ( 3 < < clk - > rate_offset ) ;
regval | = dsor_exp < < clk - > rate_offset ;
regval = verify_ckctl_value ( regval ) ;
omap_writew ( regval , ARM_CKCTL ) ;
2022-04-10 15:07:57 +02:00
clk - > rate = p_rate / ( 1 < < dsor_exp ) ;
spin_unlock_irqrestore ( & arm_ckctl_lock , flags ) ;
2009-02-08 16:07:46 +00:00
return 0 ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
long omap1_round_to_table_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long * p_rate )
2005-11-10 14:26:48 +00:00
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr ;
2009-12-08 16:29:38 -07:00
long highest_rate ;
unsigned long ref_rate ;
2010-01-19 17:30:55 -07:00
ref_rate = ck_ref_p - > rate ;
2005-11-10 14:26:48 +00:00
highest_rate = - EINVAL ;
2009-12-08 16:29:38 -07:00
for ( ptr = omap1_rate_table ; ptr - > rate ; ptr + + ) {
2011-12-08 18:01:41 -08:00
if ( ! ( ptr - > flags & cpu_mask ) )
continue ;
2009-12-08 16:29:38 -07:00
if ( ptr - > xtal ! = ref_rate )
2005-11-10 14:26:48 +00:00
continue ;
highest_rate = ptr - > rate ;
/* Can check only after xtal frequency check */
if ( ptr - > rate < = rate )
break ;
}
return highest_rate ;
}
static unsigned calc_ext_dsor ( unsigned long rate )
{
unsigned dsor ;
/* MCLK and BCLK divisor selection is not linear:
* freq = 96 MHz / dsor
*
* RATIO_SEL range : dsor < - > RATIO_SEL
* 0. .6 : ( RATIO_SEL + 2 ) < - > ( dsor - 2 )
* 6. .48 : ( 8 + ( RATIO_SEL - 6 ) * 2 ) < - > ( ( dsor - 8 ) / 2 + 6 )
* Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
* can not be used .
*/
for ( dsor = 2 ; dsor < 96 ; + + dsor ) {
if ( ( dsor & 1 ) & & dsor > 8 )
2006-04-02 17:46:20 +01:00
continue ;
2005-11-10 14:26:48 +00:00
if ( rate > = 96000000 / dsor )
break ;
}
return dsor ;
}
2009-12-08 16:29:38 -07:00
/* XXX Only needed on 1510 */
2022-04-10 15:07:57 +02:00
long omap1_round_uart_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long * p_rate )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
return rate > 24000000 ? 48000000 : 12000000 ;
}
int omap1_set_uart_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
{
unsigned long flags ;
2005-11-10 14:26:48 +00:00
unsigned int val ;
if ( rate = = 12000000 )
2022-04-10 15:07:57 +02:00
val = 0 ;
2005-11-10 14:26:48 +00:00
else if ( rate = = 48000000 )
2022-04-10 15:07:57 +02:00
val = 1 < < clk - > enable_bit ;
2005-11-10 14:26:48 +00:00
else
return - EINVAL ;
2022-04-10 15:07:57 +02:00
/* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
spin_lock_irqsave ( & mod_conf_ctrl_0_lock , flags ) ;
val | = __raw_readl ( clk - > enable_reg ) & ~ ( 1 < < clk - > enable_bit ) ;
2009-01-28 12:18:48 -07:00
__raw_writel ( val , clk - > enable_reg ) ;
2022-04-10 15:07:57 +02:00
spin_unlock_irqrestore ( & mod_conf_ctrl_0_lock , flags ) ;
2005-11-10 14:26:48 +00:00
clk - > rate = rate ;
return 0 ;
}
/* External clock (MCLK & BCLK) functions */
2022-04-10 15:07:57 +02:00
int omap1_set_ext_clk_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
unsigned long flags ;
2005-11-10 14:26:48 +00:00
unsigned dsor ;
__u16 ratio_bits ;
dsor = calc_ext_dsor ( rate ) ;
clk - > rate = 96000000 / dsor ;
if ( dsor > 8 )
ratio_bits = ( ( dsor - 8 ) / 2 + 6 ) < < 2 ;
else
ratio_bits = ( dsor - 2 ) < < 2 ;
2022-04-10 15:07:57 +02:00
/* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
spin_lock_irqsave ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2009-01-28 12:18:48 -07:00
ratio_bits | = __raw_readw ( clk - > enable_reg ) & ~ 0xfd ;
__raw_writew ( ratio_bits , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
spin_unlock_irqrestore ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2005-11-10 14:26:48 +00:00
return 0 ;
}
2022-04-10 15:07:57 +02:00
static int calc_div_sossi ( unsigned long rate , unsigned long p_rate )
2007-03-05 17:22:58 +02:00
{
int div ;
/* Round towards slower frequency */
div = ( p_rate + rate - 1 ) / rate ;
2022-04-10 15:07:57 +02:00
return - - div ;
}
long omap1_round_sossi_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long * p_rate )
{
int div ;
div = calc_div_sossi ( rate , * p_rate ) ;
if ( div < 0 )
div = 0 ;
else if ( div > 7 )
div = 7 ;
return * p_rate / ( div + 1 ) ;
}
int omap1_set_sossi_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long p_rate )
{
unsigned long flags ;
u32 l ;
int div ;
div = calc_div_sossi ( rate , p_rate ) ;
2007-03-05 17:22:58 +02:00
if ( div < 0 | | div > 7 )
return - EINVAL ;
2022-04-10 15:07:57 +02:00
/* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
spin_lock_irqsave ( & mod_conf_ctrl_1_lock , flags ) ;
2007-03-05 17:22:58 +02:00
l = omap_readl ( MOD_CONF_CTRL_1 ) ;
l & = ~ ( 7 < < 17 ) ;
l | = div < < 17 ;
omap_writel ( l , MOD_CONF_CTRL_1 ) ;
clk - > rate = p_rate / ( div + 1 ) ;
2022-04-10 15:07:57 +02:00
spin_unlock_irqrestore ( & mod_conf_ctrl_1_lock , flags ) ;
2007-03-05 17:22:58 +02:00
return 0 ;
}
2022-04-10 15:07:57 +02:00
long omap1_round_ext_clk_rate ( struct omap1_clk * clk , unsigned long rate , unsigned long * p_rate )
2005-11-10 14:26:48 +00:00
{
return 96000000 / calc_ext_dsor ( rate ) ;
}
2022-04-10 15:07:57 +02:00
int omap1_init_ext_clk ( struct omap1_clk * clk )
2005-11-10 14:26:48 +00:00
{
unsigned dsor ;
__u16 ratio_bits ;
/* Determine current rate and ensure clock is based on 96MHz APLL */
2009-01-28 12:18:48 -07:00
ratio_bits = __raw_readw ( clk - > enable_reg ) & ~ 1 ;
__raw_writew ( ratio_bits , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
ratio_bits = ( ratio_bits & 0xfc ) > > 2 ;
if ( ratio_bits > 6 )
dsor = ( ratio_bits - 6 ) * 2 + 8 ;
else
dsor = ratio_bits + 2 ;
clk - > rate = 96000000 / dsor ;
2022-04-10 15:07:57 +02:00
return 0 ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
static int omap1_clk_enable ( struct clk_hw * hw )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) , * parent = to_omap1_clk ( clk_hw_get_parent ( hw ) ) ;
2005-11-10 14:26:48 +00:00
int ret = 0 ;
2009-04-05 12:27:24 +01:00
2022-04-10 15:07:57 +02:00
if ( parent & & clk - > flags & CLOCK_NO_IDLE_PARENT )
omap1_clk_deny_idle ( parent ) ;
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
if ( clk - > ops & & ! ( WARN_ON ( ! clk - > ops - > enable ) ) )
2008-11-04 14:02:46 +00:00
ret = clk - > ops - > enable ( clk ) ;
2005-11-10 14:26:48 +00:00
return ret ;
}
2022-04-10 15:07:57 +02:00
static void omap1_clk_disable ( struct clk_hw * hw )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) , * parent = to_omap1_clk ( clk_hw_get_parent ( hw ) ) ;
if ( clk - > ops & & ! ( WARN_ON ( ! clk - > ops - > disable ) ) )
2008-11-04 14:02:46 +00:00
clk - > ops - > disable ( clk ) ;
2022-04-10 15:07:57 +02:00
if ( likely ( parent ) & & clk - > flags & CLOCK_NO_IDLE_PARENT )
omap1_clk_allow_idle ( parent ) ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
static int omap1_clk_enable_generic ( struct omap1_clk * clk )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
unsigned long flags ;
2005-11-10 14:26:48 +00:00
__u16 regval16 ;
__u32 regval32 ;
2008-09-05 15:10:27 +01:00
if ( unlikely ( clk - > enable_reg = = NULL ) ) {
2005-11-10 14:26:48 +00:00
printk ( KERN_ERR " clock.c: Enable for %s without enable code \n " ,
2022-04-10 15:07:57 +02:00
clk_hw_get_name ( & clk - > hw ) ) ;
2006-12-06 17:13:51 -08:00
return - EINVAL ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
/* protect clk->enable_reg from concurrent access via clk_set_rate() */
if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_CKCTL ) )
spin_lock_irqsave ( & arm_ckctl_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_IDLECT2 ) )
spin_lock_irqsave ( & arm_idlect2_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_0 ) )
spin_lock_irqsave ( & mod_conf_ctrl_0_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_1 ) )
spin_lock_irqsave ( & mod_conf_ctrl_1_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( SWD_CLK_DIV_CTRL_SEL ) )
spin_lock_irqsave ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2005-11-10 14:26:48 +00:00
if ( clk - > flags & ENABLE_REG_32BIT ) {
2009-01-28 12:18:48 -07:00
regval32 = __raw_readl ( clk - > enable_reg ) ;
regval32 | = ( 1 < < clk - > enable_bit ) ;
__raw_writel ( regval32 , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
} else {
2009-01-28 12:18:48 -07:00
regval16 = __raw_readw ( clk - > enable_reg ) ;
regval16 | = ( 1 < < clk - > enable_bit ) ;
__raw_writew ( regval16 , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_CKCTL ) )
spin_unlock_irqrestore ( & arm_ckctl_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_IDLECT2 ) )
spin_unlock_irqrestore ( & arm_idlect2_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_0 ) )
spin_unlock_irqrestore ( & mod_conf_ctrl_0_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_1 ) )
spin_unlock_irqrestore ( & mod_conf_ctrl_1_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( SWD_CLK_DIV_CTRL_SEL ) )
spin_unlock_irqrestore ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2006-12-06 17:13:51 -08:00
return 0 ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
static void omap1_clk_disable_generic ( struct omap1_clk * clk )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
unsigned long flags ;
2005-11-10 14:26:48 +00:00
__u16 regval16 ;
__u32 regval32 ;
2008-09-05 15:10:27 +01:00
if ( clk - > enable_reg = = NULL )
2005-11-10 14:26:48 +00:00
return ;
2022-04-10 15:07:57 +02:00
/* protect clk->enable_reg from concurrent access via clk_set_rate() */
if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_CKCTL ) )
spin_lock_irqsave ( & arm_ckctl_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_IDLECT2 ) )
spin_lock_irqsave ( & arm_idlect2_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_0 ) )
spin_lock_irqsave ( & mod_conf_ctrl_0_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_1 ) )
spin_lock_irqsave ( & mod_conf_ctrl_1_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( SWD_CLK_DIV_CTRL_SEL ) )
spin_lock_irqsave ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2005-11-10 14:26:48 +00:00
if ( clk - > flags & ENABLE_REG_32BIT ) {
2009-01-28 12:18:48 -07:00
regval32 = __raw_readl ( clk - > enable_reg ) ;
regval32 & = ~ ( 1 < < clk - > enable_bit ) ;
__raw_writel ( regval32 , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
} else {
2009-01-28 12:18:48 -07:00
regval16 = __raw_readw ( clk - > enable_reg ) ;
regval16 & = ~ ( 1 < < clk - > enable_bit ) ;
__raw_writew ( regval16 , clk - > enable_reg ) ;
2005-11-10 14:26:48 +00:00
}
2022-04-10 15:07:57 +02:00
if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_CKCTL ) )
spin_unlock_irqrestore ( & arm_ckctl_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( ARM_IDLECT2 ) )
spin_unlock_irqrestore ( & arm_idlect2_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_0 ) )
spin_unlock_irqrestore ( & mod_conf_ctrl_0_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( MOD_CONF_CTRL_1 ) )
spin_unlock_irqrestore ( & mod_conf_ctrl_1_lock , flags ) ;
else if ( clk - > enable_reg = = OMAP1_IO_ADDRESS ( SWD_CLK_DIV_CTRL_SEL ) )
spin_unlock_irqrestore ( & swd_clk_div_ctrl_sel_lock , flags ) ;
2005-11-10 14:26:48 +00:00
}
2009-12-08 16:29:38 -07:00
const struct clkops clkops_generic = {
. enable = omap1_clk_enable_generic ,
. disable = omap1_clk_disable_generic ,
} ;
2022-04-10 15:07:57 +02:00
static int omap1_clk_enable_dsp_domain ( struct omap1_clk * clk )
2009-12-08 16:29:38 -07:00
{
2022-04-10 15:07:57 +02:00
bool api_ck_was_enabled ;
int retval = 0 ;
api_ck_was_enabled = omap1_clk_is_enabled ( & api_ck_p - > hw ) ;
if ( ! api_ck_was_enabled )
retval = api_ck_p - > ops - > enable ( api_ck_p ) ;
2009-12-08 16:29:38 -07:00
if ( ! retval ) {
retval = omap1_clk_enable_generic ( clk ) ;
2022-04-10 15:07:57 +02:00
if ( ! api_ck_was_enabled )
api_ck_p - > ops - > disable ( api_ck_p ) ;
2009-12-08 16:29:38 -07:00
}
return retval ;
}
2022-04-10 15:07:57 +02:00
static void omap1_clk_disable_dsp_domain ( struct omap1_clk * clk )
2009-12-08 16:29:38 -07:00
{
2022-04-10 15:07:57 +02:00
bool api_ck_was_enabled ;
api_ck_was_enabled = omap1_clk_is_enabled ( & api_ck_p - > hw ) ;
if ( ! api_ck_was_enabled )
if ( api_ck_p - > ops - > enable ( api_ck_p ) < 0 )
return ;
omap1_clk_disable_generic ( clk ) ;
if ( ! api_ck_was_enabled )
api_ck_p - > ops - > disable ( api_ck_p ) ;
2009-12-08 16:29:38 -07:00
}
const struct clkops clkops_dspck = {
. enable = omap1_clk_enable_dsp_domain ,
. disable = omap1_clk_disable_dsp_domain ,
2008-11-04 14:02:46 +00:00
} ;
2010-07-26 16:34:28 -06:00
/* XXX SYSC register handling does not belong in the clock framework */
2022-04-10 15:07:57 +02:00
static int omap1_clk_enable_uart_functional_16xx ( struct omap1_clk * clk )
2009-12-08 16:29:38 -07:00
{
int ret ;
struct uart_clk * uclk ;
ret = omap1_clk_enable_generic ( clk ) ;
if ( ret = = 0 ) {
/* Set smart idle acknowledgement mode */
uclk = ( struct uart_clk * ) clk ;
omap_writeb ( ( omap_readb ( uclk - > sysc_addr ) & ~ 0x10 ) | 8 ,
uclk - > sysc_addr ) ;
}
return ret ;
}
2010-07-26 16:34:28 -06:00
/* XXX SYSC register handling does not belong in the clock framework */
2022-04-10 15:07:57 +02:00
static void omap1_clk_disable_uart_functional_16xx ( struct omap1_clk * clk )
2009-12-08 16:29:38 -07:00
{
struct uart_clk * uclk ;
/* Set force idle acknowledgement mode */
uclk = ( struct uart_clk * ) clk ;
omap_writeb ( ( omap_readb ( uclk - > sysc_addr ) & ~ 0x18 ) , uclk - > sysc_addr ) ;
omap1_clk_disable_generic ( clk ) ;
}
2010-07-26 16:34:28 -06:00
/* XXX SYSC register handling does not belong in the clock framework */
const struct clkops clkops_uart_16xx = {
. enable = omap1_clk_enable_uart_functional_16xx ,
. disable = omap1_clk_disable_uart_functional_16xx ,
2009-12-08 16:29:38 -07:00
} ;
2022-04-10 15:07:57 +02:00
static unsigned long omap1_clk_recalc_rate ( struct clk_hw * hw , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
if ( clk - > recalc )
return clk - > recalc ( clk , p_rate ) ;
2005-11-10 14:26:48 +00:00
return clk - > rate ;
}
2022-04-10 15:07:57 +02:00
static long omap1_clk_round_rate ( struct clk_hw * hw , unsigned long rate , unsigned long * p_rate )
{
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
if ( clk - > round_rate ! = NULL )
return clk - > round_rate ( clk , rate , p_rate ) ;
return omap1_clk_recalc_rate ( hw , * p_rate ) ;
}
static int omap1_clk_set_rate ( struct clk_hw * hw , unsigned long rate , unsigned long p_rate )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
2005-11-10 14:26:48 +00:00
int ret = - EINVAL ;
if ( clk - > set_rate )
2022-04-10 15:07:57 +02:00
ret = clk - > set_rate ( clk , rate , p_rate ) ;
2005-11-10 14:26:48 +00:00
return ret ;
}
2010-07-26 16:34:28 -06:00
/*
2005-11-10 14:26:48 +00:00
* Omap1 clock reset and init functions
2010-07-26 16:34:28 -06:00
*/
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
static int omap1_clk_init_op ( struct clk_hw * hw )
{
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
if ( clk - > init )
return clk - > init ( clk ) ;
return 0 ;
}
2005-11-10 14:26:48 +00:00
# ifdef CONFIG_OMAP_RESET_CLOCKS
2022-04-10 15:07:57 +02:00
static void omap1_clk_disable_unused ( struct clk_hw * hw )
2005-11-10 14:26:48 +00:00
{
2022-04-10 15:07:57 +02:00
struct omap1_clk * clk = to_omap1_clk ( hw ) ;
const char * name = clk_hw_get_name ( hw ) ;
2005-11-10 14:26:48 +00:00
2006-09-25 13:27:20 +03:00
/* Clocks in the DSP domain need api_ck. Just assume bootloader
* has not enabled any DSP clocks */
2008-09-05 15:46:19 +01:00
if ( clk - > enable_reg = = DSP_IDLECT2 ) {
2022-04-10 15:07:57 +02:00
pr_info ( " Skipping reset check for DSP domain clock \" %s \" \n " , name ) ;
2006-09-25 13:27:20 +03:00
return ;
}
2005-11-10 14:26:48 +00:00
2022-04-10 15:07:57 +02:00
pr_info ( " Disabling unused clock \" %s \" ... " , name ) ;
omap1_clk_disable ( hw ) ;
2006-09-25 13:27:20 +03:00
printk ( " done \n " ) ;
2005-11-10 14:26:48 +00:00
}
# endif
2012-09-27 10:33:33 -06:00
2022-04-10 15:07:57 +02:00
const struct clk_ops omap1_clk_gate_ops = {
. enable = omap1_clk_enable ,
. disable = omap1_clk_disable ,
. is_enabled = omap1_clk_is_enabled ,
# ifdef CONFIG_OMAP_RESET_CLOCKS
. disable_unused = omap1_clk_disable_unused ,
# endif
} ;
2012-09-27 10:33:33 -06:00
2022-04-10 15:07:57 +02:00
const struct clk_ops omap1_clk_rate_ops = {
. recalc_rate = omap1_clk_recalc_rate ,
. round_rate = omap1_clk_round_rate ,
. set_rate = omap1_clk_set_rate ,
. init = omap1_clk_init_op ,
} ;
2012-09-27 10:33:33 -06:00
2022-04-10 15:07:57 +02:00
const struct clk_ops omap1_clk_full_ops = {
. enable = omap1_clk_enable ,
. disable = omap1_clk_disable ,
. is_enabled = omap1_clk_is_enabled ,
# ifdef CONFIG_OMAP_RESET_CLOCKS
. disable_unused = omap1_clk_disable_unused ,
# endif
. recalc_rate = omap1_clk_recalc_rate ,
. round_rate = omap1_clk_round_rate ,
. set_rate = omap1_clk_set_rate ,
. init = omap1_clk_init_op ,
} ;
2012-09-27 10:33:33 -06:00
/*
* OMAP specific clock functions shared between omap1 and omap2
*/
/* Used for clocks that always have same value as the parent clock */
2022-04-10 15:07:57 +02:00
unsigned long followparent_recalc ( struct omap1_clk * clk , unsigned long p_rate )
2012-09-27 10:33:33 -06:00
{
2022-04-10 15:07:57 +02:00
return p_rate ;
2012-09-27 10:33:33 -06:00
}
/*
* Used for clocks that have the same value as the parent clock ,
* divided by some factor
*/
2022-04-10 15:07:57 +02:00
unsigned long omap_fixed_divisor_recalc ( struct omap1_clk * clk , unsigned long p_rate )
2012-09-27 10:33:33 -06:00
{
WARN_ON ( ! clk - > fixed_div ) ;
2022-04-10 15:07:57 +02:00
return p_rate / clk - > fixed_div ;
2012-09-27 10:33:33 -06:00
}
/* Propagate rate to children */
2022-04-10 15:07:57 +02:00
void propagate_rate ( struct omap1_clk * tclk )
2012-09-27 10:33:33 -06:00
{
struct clk * clkp ;
2022-04-10 15:07:57 +02:00
/* depend on CCF ability to recalculate new rates across whole clock subtree */
if ( WARN_ON ( ! ( clk_hw_get_flags ( & tclk - > hw ) & CLK_GET_RATE_NOCACHE ) ) )
2012-09-27 10:33:33 -06:00
return ;
2022-04-10 15:07:57 +02:00
clkp = clk_get_sys ( NULL , clk_hw_get_name ( & tclk - > hw ) ) ;
if ( WARN_ON ( ! clkp ) )
return ;
2012-09-27 10:33:33 -06:00
2022-04-10 15:07:57 +02:00
clk_get_rate ( clkp ) ;
clk_put ( clkp ) ;
2012-09-27 10:33:33 -06:00
}
2022-04-10 15:07:57 +02:00
const struct clk_ops omap1_clk_null_ops = {
2012-09-27 10:33:33 -06:00
} ;
/*
* Dummy clock
*
* Used for clock aliases that are needed on some OMAPs , but not others
*/
2022-04-10 15:07:57 +02:00
struct omap1_clk dummy_ck __refdata = {
. hw . init = CLK_HW_INIT_NO_PARENT ( " dummy " , & omap1_clk_null_ops , 0 ) ,
2012-09-27 10:33:33 -06:00
} ;