2015-07-31 12:43:12 +03:00
/*
* Copyright ( C ) 2015 Atmel Corporation ,
* Nicolas Ferre < nicolas . ferre @ atmel . com >
*
* Based on clk - programmable & clk - peripheral drivers by Boris BREZILLON .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
*/
# include <linux/clk-provider.h>
# include <linux/clkdev.h>
# include <linux/clk/at91_pmc.h>
# include <linux/of.h>
2014-09-07 10:14:29 +04:00
# include <linux/mfd/syscon.h>
# include <linux/regmap.h>
2015-07-31 12:43:12 +03:00
# include "pmc.h"
# define GENERATED_MAX_DIV 255
2017-08-10 09:34:05 +03:00
# define GCK_INDEX_DT_AUDIO_PLL 5
2015-07-31 12:43:12 +03:00
struct clk_generated {
struct clk_hw hw ;
2014-09-07 10:14:29 +04:00
struct regmap * regmap ;
2015-07-31 12:43:12 +03:00
struct clk_range range ;
2014-09-07 10:14:29 +04:00
spinlock_t * lock ;
2015-07-31 12:43:12 +03:00
u32 id ;
u32 gckdiv ;
u8 parent_id ;
2017-08-10 09:34:05 +03:00
bool audio_pll_allowed ;
2015-07-31 12:43:12 +03:00
} ;
# define to_clk_generated(hw) \
container_of ( hw , struct clk_generated , hw )
static int clk_generated_enable ( struct clk_hw * hw )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
2014-09-07 10:14:29 +04:00
unsigned long flags ;
2015-07-31 12:43:12 +03:00
pr_debug ( " GCLK: %s, gckdiv = %d, parent id = %d \n " ,
__func__ , gck - > gckdiv , gck - > parent_id ) ;
2014-09-07 10:14:29 +04:00
spin_lock_irqsave ( gck - > lock , flags ) ;
regmap_write ( gck - > regmap , AT91_PMC_PCR ,
( gck - > id & AT91_PMC_PCR_PID_MASK ) ) ;
regmap_update_bits ( gck - > regmap , AT91_PMC_PCR ,
AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN ,
AT91_PMC_PCR_GCKCSS ( gck - > parent_id ) |
AT91_PMC_PCR_CMD |
AT91_PMC_PCR_GCKDIV ( gck - > gckdiv ) |
AT91_PMC_PCR_GCKEN ) ;
spin_unlock_irqrestore ( gck - > lock , flags ) ;
2015-07-31 12:43:12 +03:00
return 0 ;
}
static void clk_generated_disable ( struct clk_hw * hw )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
2014-09-07 10:14:29 +04:00
unsigned long flags ;
spin_lock_irqsave ( gck - > lock , flags ) ;
regmap_write ( gck - > regmap , AT91_PMC_PCR ,
( gck - > id & AT91_PMC_PCR_PID_MASK ) ) ;
regmap_update_bits ( gck - > regmap , AT91_PMC_PCR ,
AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN ,
AT91_PMC_PCR_CMD ) ;
spin_unlock_irqrestore ( gck - > lock , flags ) ;
2015-07-31 12:43:12 +03:00
}
static int clk_generated_is_enabled ( struct clk_hw * hw )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
2014-09-07 10:14:29 +04:00
unsigned long flags ;
unsigned int status ;
2015-07-31 12:43:12 +03:00
2014-09-07 10:14:29 +04:00
spin_lock_irqsave ( gck - > lock , flags ) ;
regmap_write ( gck - > regmap , AT91_PMC_PCR ,
( gck - > id & AT91_PMC_PCR_PID_MASK ) ) ;
regmap_read ( gck - > regmap , AT91_PMC_PCR , & status ) ;
spin_unlock_irqrestore ( gck - > lock , flags ) ;
2015-07-31 12:43:12 +03:00
2014-09-07 10:14:29 +04:00
return status & AT91_PMC_PCR_GCKEN ? 1 : 0 ;
2015-07-31 12:43:12 +03:00
}
static unsigned long
clk_generated_recalc_rate ( struct clk_hw * hw ,
unsigned long parent_rate )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
return DIV_ROUND_CLOSEST ( parent_rate , gck - > gckdiv + 1 ) ;
}
2017-08-10 09:34:04 +03:00
static void clk_generated_best_diff ( struct clk_rate_request * req ,
struct clk_hw * parent ,
unsigned long parent_rate , u32 div ,
int * best_diff , long * best_rate )
{
unsigned long tmp_rate ;
int tmp_diff ;
if ( ! div )
tmp_rate = parent_rate ;
else
tmp_rate = parent_rate / div ;
tmp_diff = abs ( req - > rate - tmp_rate ) ;
if ( * best_diff < 0 | | * best_diff > tmp_diff ) {
* best_rate = tmp_rate ;
* best_diff = tmp_diff ;
req - > best_parent_rate = parent_rate ;
req - > best_parent_hw = parent ;
}
}
2015-07-31 12:43:12 +03:00
static int clk_generated_determine_rate ( struct clk_hw * hw ,
struct clk_rate_request * req )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
struct clk_hw * parent = NULL ;
2017-08-10 09:34:05 +03:00
struct clk_rate_request req_parent = * req ;
2015-07-31 12:43:12 +03:00
long best_rate = - EINVAL ;
2017-08-10 09:34:05 +03:00
unsigned long min_rate , parent_rate ;
2015-07-31 12:43:12 +03:00
int best_diff = - 1 ;
int i ;
2017-08-10 09:34:05 +03:00
u32 div ;
2015-07-31 12:43:12 +03:00
2017-08-10 09:34:05 +03:00
for ( i = 0 ; i < clk_hw_get_num_parents ( hw ) - 1 ; i + + ) {
2015-07-31 12:43:12 +03:00
parent = clk_hw_get_parent_by_index ( hw , i ) ;
if ( ! parent )
continue ;
parent_rate = clk_hw_get_rate ( parent ) ;
min_rate = DIV_ROUND_CLOSEST ( parent_rate , GENERATED_MAX_DIV + 1 ) ;
if ( ! parent_rate | |
( gck - > range . max & & min_rate > gck - > range . max ) )
continue ;
2017-08-10 09:34:01 +03:00
div = DIV_ROUND_CLOSEST ( parent_rate , req - > rate ) ;
2017-08-10 09:34:04 +03:00
clk_generated_best_diff ( req , parent , parent_rate , div ,
& best_diff , & best_rate ) ;
2017-08-10 09:34:05 +03:00
if ( ! best_diff )
break ;
}
/*
* The audio_pll rate can be modified , unlike the five others clocks
* that should never be altered .
* The audio_pll can technically be used by multiple consumers . However ,
* with the rate locking , the first consumer to enable to clock will be
* the one definitely setting the rate of the clock .
* Since audio IPs are most likely to request the same rate , we enforce
* that the only clks able to modify gck rate are those of audio IPs .
*/
if ( ! gck - > audio_pll_allowed )
goto end ;
parent = clk_hw_get_parent_by_index ( hw , GCK_INDEX_DT_AUDIO_PLL ) ;
if ( ! parent )
goto end ;
for ( div = 1 ; div < GENERATED_MAX_DIV + 2 ; div + + ) {
req_parent . rate = req - > rate * div ;
__clk_determine_rate ( parent , & req_parent ) ;
clk_generated_best_diff ( req , parent , req_parent . rate , div ,
& best_diff , & best_rate ) ;
2015-07-31 12:43:12 +03:00
if ( ! best_diff )
break ;
}
2017-08-10 09:34:05 +03:00
end :
2015-07-31 12:43:12 +03:00
pr_debug ( " GCLK: %s, best_rate = %ld, parent clk: %s @ %ld \n " ,
__func__ , best_rate ,
__clk_get_name ( ( req - > best_parent_hw ) - > clk ) ,
req - > best_parent_rate ) ;
if ( best_rate < 0 )
return best_rate ;
req - > rate = best_rate ;
return 0 ;
}
/* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
static int clk_generated_set_parent ( struct clk_hw * hw , u8 index )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
if ( index > = clk_hw_get_num_parents ( hw ) )
return - EINVAL ;
gck - > parent_id = index ;
return 0 ;
}
static u8 clk_generated_get_parent ( struct clk_hw * hw )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
return gck - > parent_id ;
}
/* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
static int clk_generated_set_rate ( struct clk_hw * hw ,
unsigned long rate ,
unsigned long parent_rate )
{
struct clk_generated * gck = to_clk_generated ( hw ) ;
u32 div ;
if ( ! rate )
return - EINVAL ;
if ( gck - > range . max & & rate > gck - > range . max )
return - EINVAL ;
div = DIV_ROUND_CLOSEST ( parent_rate , rate ) ;
if ( div > GENERATED_MAX_DIV + 1 | | ! div )
return - EINVAL ;
gck - > gckdiv = div - 1 ;
return 0 ;
}
static const struct clk_ops generated_ops = {
. enable = clk_generated_enable ,
. disable = clk_generated_disable ,
. is_enabled = clk_generated_is_enabled ,
. recalc_rate = clk_generated_recalc_rate ,
. determine_rate = clk_generated_determine_rate ,
. get_parent = clk_generated_get_parent ,
. set_parent = clk_generated_set_parent ,
. set_rate = clk_generated_set_rate ,
} ;
/**
* clk_generated_startup - Initialize a given clock to its default parent and
* divisor parameter .
*
* @ gck : Generated clock to set the startup parameters for .
*
* Take parameters from the hardware and update local clock configuration
* accordingly .
*/
static void clk_generated_startup ( struct clk_generated * gck )
{
u32 tmp ;
2014-09-07 10:14:29 +04:00
unsigned long flags ;
2015-07-31 12:43:12 +03:00
2014-09-07 10:14:29 +04:00
spin_lock_irqsave ( gck - > lock , flags ) ;
regmap_write ( gck - > regmap , AT91_PMC_PCR ,
( gck - > id & AT91_PMC_PCR_PID_MASK ) ) ;
regmap_read ( gck - > regmap , AT91_PMC_PCR , & tmp ) ;
spin_unlock_irqrestore ( gck - > lock , flags ) ;
2015-07-31 12:43:12 +03:00
gck - > parent_id = ( tmp & AT91_PMC_PCR_GCKCSS_MASK )
> > AT91_PMC_PCR_GCKCSS_OFFSET ;
gck - > gckdiv = ( tmp & AT91_PMC_PCR_GCKDIV_MASK )
> > AT91_PMC_PCR_GCKDIV_OFFSET ;
}
2018-10-16 17:21:44 +03:00
struct clk_hw * __init
2016-06-02 00:31:22 +03:00
at91_clk_register_generated ( struct regmap * regmap , spinlock_t * lock ,
const char * name , const char * * parent_names ,
2018-10-16 17:21:43 +03:00
u8 num_parents , u8 id , bool pll_audio ,
2016-06-02 00:31:22 +03:00
const struct clk_range * range )
2015-07-31 12:43:12 +03:00
{
struct clk_generated * gck ;
struct clk_init_data init ;
2016-06-02 00:31:22 +03:00
struct clk_hw * hw ;
int ret ;
2015-07-31 12:43:12 +03:00
gck = kzalloc ( sizeof ( * gck ) , GFP_KERNEL ) ;
if ( ! gck )
return ERR_PTR ( - ENOMEM ) ;
init . name = name ;
init . ops = & generated_ops ;
init . parent_names = parent_names ;
init . num_parents = num_parents ;
2017-08-10 09:34:05 +03:00
init . flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
CLK_SET_RATE_PARENT ;
2015-07-31 12:43:12 +03:00
gck - > id = id ;
gck - > hw . init = & init ;
2014-09-07 10:14:29 +04:00
gck - > regmap = regmap ;
gck - > lock = lock ;
2015-07-31 12:43:12 +03:00
gck - > range = * range ;
2018-10-16 17:21:43 +03:00
gck - > audio_pll_allowed = pll_audio ;
2015-07-31 12:43:12 +03:00
2017-05-12 17:25:30 +03:00
clk_generated_startup ( gck ) ;
2016-06-02 00:31:22 +03:00
hw = & gck - > hw ;
ret = clk_hw_register ( NULL , & gck - > hw ) ;
if ( ret ) {
2015-07-31 12:43:12 +03:00
kfree ( gck ) ;
2016-06-02 00:31:22 +03:00
hw = ERR_PTR ( ret ) ;
2017-06-08 03:36:47 +03:00
} else {
pmc_register_id ( id ) ;
2017-06-05 01:02:57 +03:00
}
2015-07-31 12:43:12 +03:00
2016-06-02 00:31:22 +03:00
return hw ;
2015-07-31 12:43:12 +03:00
}