2019-01-22 09:31:41 +00:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 - 2018 NXP .
*/
2022-03-04 13:52:55 +01:00
# define pr_fmt(fmt) "pll14xx: " fmt
2022-03-04 13:52:51 +01:00
# include <linux/bitfield.h>
2020-08-05 07:17:29 +08:00
# include <linux/bits.h>
2019-01-22 09:31:41 +00:00
# include <linux/clk-provider.h>
# include <linux/err.h>
2020-07-30 09:22:51 +08:00
# include <linux/export.h>
2019-01-22 09:31:41 +00:00
# include <linux/io.h>
# include <linux/iopoll.h>
# include <linux/slab.h>
# include <linux/jiffies.h>
# include "clk.h"
# define GNRL_CTL 0x0
2022-03-04 13:52:49 +01:00
# define DIV_CTL0 0x4
# define DIV_CTL1 0x8
2019-01-22 09:31:41 +00:00
# define LOCK_STATUS BIT(31)
# define LOCK_SEL_MASK BIT(29)
# define CLKE_MASK BIT(11)
# define RST_MASK BIT(9)
# define BYPASS_MASK BIT(4)
# define MDIV_MASK GENMASK(21, 12)
# define PDIV_MASK GENMASK(9, 4)
# define SDIV_MASK GENMASK(2, 0)
# define KDIV_MASK GENMASK(15, 0)
2022-03-04 13:52:56 +01:00
# define KDIV_MIN SHRT_MIN
# define KDIV_MAX SHRT_MAX
2019-01-22 09:31:41 +00:00
# define LOCK_TIMEOUT_US 10000
struct clk_pll14xx {
struct clk_hw hw ;
void __iomem * base ;
enum imx_pll14xx_type type ;
const struct imx_pll14xx_rate_table * rate_table ;
int rate_count ;
} ;
# define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
2019-10-08 15:19:08 +08:00
static const struct imx_pll14xx_rate_table imx_pll1416x_tbl [ ] = {
2019-09-06 09:34:05 -04:00
PLL_1416X_RATE ( 1800000000U , 225 , 3 , 0 ) ,
PLL_1416X_RATE ( 1600000000U , 200 , 3 , 0 ) ,
2019-09-06 09:34:06 -04:00
PLL_1416X_RATE ( 1500000000U , 375 , 3 , 1 ) ,
PLL_1416X_RATE ( 1400000000U , 350 , 3 , 1 ) ,
2019-09-06 09:34:05 -04:00
PLL_1416X_RATE ( 1200000000U , 300 , 3 , 1 ) ,
PLL_1416X_RATE ( 1000000000U , 250 , 3 , 1 ) ,
PLL_1416X_RATE ( 800000000U , 200 , 3 , 1 ) ,
PLL_1416X_RATE ( 750000000U , 250 , 2 , 2 ) ,
PLL_1416X_RATE ( 700000000U , 350 , 3 , 2 ) ,
2022-10-31 21:48:38 +01:00
PLL_1416X_RATE ( 640000000U , 320 , 3 , 2 ) ,
2019-09-06 09:34:05 -04:00
PLL_1416X_RATE ( 600000000U , 300 , 3 , 2 ) ,
2022-10-31 21:48:38 +01:00
PLL_1416X_RATE ( 320000000U , 160 , 3 , 2 ) ,
2019-09-06 09:34:05 -04:00
} ;
2019-10-08 15:19:08 +08:00
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl [ ] = {
2020-01-16 14:50:49 +08:00
PLL_1443X_RATE ( 1039500000U , 173 , 2 , 1 , 16384 ) ,
2019-09-06 09:34:05 -04:00
PLL_1443X_RATE ( 650000000U , 325 , 3 , 2 , 0 ) ,
PLL_1443X_RATE ( 594000000U , 198 , 2 , 2 , 0 ) ,
2020-01-16 14:50:49 +08:00
PLL_1443X_RATE ( 519750000U , 173 , 2 , 2 , 16384 ) ,
2019-09-06 09:34:05 -04:00
} ;
struct imx_pll14xx_clk imx_1443x_pll = {
. type = PLL_1443X ,
. rate_table = imx_pll1443x_tbl ,
. rate_count = ARRAY_SIZE ( imx_pll1443x_tbl ) ,
} ;
2020-07-30 09:22:51 +08:00
EXPORT_SYMBOL_GPL ( imx_1443x_pll ) ;
2019-09-06 09:34:05 -04:00
2019-11-22 23:45:01 +02:00
struct imx_pll14xx_clk imx_1443x_dram_pll = {
. type = PLL_1443X ,
. rate_table = imx_pll1443x_tbl ,
. rate_count = ARRAY_SIZE ( imx_pll1443x_tbl ) ,
. flags = CLK_GET_RATE_NOCACHE ,
} ;
2020-07-30 09:22:51 +08:00
EXPORT_SYMBOL_GPL ( imx_1443x_dram_pll ) ;
2019-11-22 23:45:01 +02:00
2019-09-06 09:34:05 -04:00
struct imx_pll14xx_clk imx_1416x_pll = {
. type = PLL_1416X ,
. rate_table = imx_pll1416x_tbl ,
. rate_count = ARRAY_SIZE ( imx_pll1416x_tbl ) ,
} ;
2020-07-30 09:22:51 +08:00
EXPORT_SYMBOL_GPL ( imx_1416x_pll ) ;
2019-09-06 09:34:05 -04:00
2019-01-22 09:31:41 +00:00
static const struct imx_pll14xx_rate_table * imx_get_pll_settings (
struct clk_pll14xx * pll , unsigned long rate )
{
const struct imx_pll14xx_rate_table * rate_table = pll - > rate_table ;
int i ;
for ( i = 0 ; i < pll - > rate_count ; i + + )
if ( rate = = rate_table [ i ] . rate )
return & rate_table [ i ] ;
return NULL ;
}
2022-03-04 13:52:52 +01:00
static long pll14xx_calc_rate ( struct clk_pll14xx * pll , int mdiv , int pdiv ,
int sdiv , int kdiv , unsigned long prate )
{
u64 fvco = prate ;
/* fvco = (m * 65536 + k) * Fin / (p * 65536) */
fvco * = ( mdiv * 65536 + kdiv ) ;
pdiv * = 65536 ;
do_div ( fvco , pdiv < < sdiv ) ;
return fvco ;
}
2022-03-04 13:52:56 +01:00
static long pll1443x_calc_kdiv ( int mdiv , int pdiv , int sdiv ,
unsigned long rate , unsigned long prate )
{
long kdiv ;
/* calc kdiv = round(rate * pdiv * 65536 * 2^sdiv / prate) - (mdiv * 65536) */
kdiv = ( ( rate * ( ( pdiv * 65536 ) < < sdiv ) + prate / 2 ) / prate ) - ( mdiv * 65536 ) ;
return clamp_t ( short , kdiv , KDIV_MIN , KDIV_MAX ) ;
}
static void imx_pll14xx_calc_settings ( struct clk_pll14xx * pll , unsigned long rate ,
unsigned long prate , struct imx_pll14xx_rate_table * t )
{
u32 pll_div_ctl0 , pll_div_ctl1 ;
int mdiv , pdiv , sdiv , kdiv ;
long fvco , rate_min , rate_max , dist , best = LONG_MAX ;
const struct imx_pll14xx_rate_table * tt ;
/*
* Fractional PLL constrains :
*
2023-08-07 10:47:43 +02:00
* a ) 1 < = p < = 63
* b ) 64 < = m < = 1023
* c ) 0 < = s < = 6
* d ) - 32768 < = k < = 32767
2022-03-04 13:52:56 +01:00
*
* fvco = ( m * 65536 + k ) * prate / ( p * 65536 )
*/
/* First try if we can get the desired rate from one of the static entries */
tt = imx_get_pll_settings ( pll , rate ) ;
if ( tt ) {
pr_debug ( " %s: in=%ld, want=%ld, Using PLL setting from table \n " ,
clk_hw_get_name ( & pll - > hw ) , prate , rate ) ;
t - > rate = tt - > rate ;
t - > mdiv = tt - > mdiv ;
t - > pdiv = tt - > pdiv ;
t - > sdiv = tt - > sdiv ;
t - > kdiv = tt - > kdiv ;
return ;
}
pll_div_ctl0 = readl_relaxed ( pll - > base + DIV_CTL0 ) ;
mdiv = FIELD_GET ( MDIV_MASK , pll_div_ctl0 ) ;
pdiv = FIELD_GET ( PDIV_MASK , pll_div_ctl0 ) ;
sdiv = FIELD_GET ( SDIV_MASK , pll_div_ctl0 ) ;
pll_div_ctl1 = readl_relaxed ( pll - > base + DIV_CTL1 ) ;
/* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */
rate_min = pll14xx_calc_rate ( pll , mdiv , pdiv , sdiv , KDIV_MIN , prate ) ;
rate_max = pll14xx_calc_rate ( pll , mdiv , pdiv , sdiv , KDIV_MAX , prate ) ;
if ( rate > = rate_min & & rate < = rate_max ) {
kdiv = pll1443x_calc_kdiv ( mdiv , pdiv , sdiv , rate , prate ) ;
pr_debug ( " %s: in=%ld, want=%ld Only adjust kdiv %ld -> %d \n " ,
clk_hw_get_name ( & pll - > hw ) , prate , rate ,
FIELD_GET ( KDIV_MASK , pll_div_ctl1 ) , kdiv ) ;
fvco = pll14xx_calc_rate ( pll , mdiv , pdiv , sdiv , kdiv , prate ) ;
t - > rate = ( unsigned int ) fvco ;
t - > mdiv = mdiv ;
t - > pdiv = pdiv ;
t - > sdiv = sdiv ;
t - > kdiv = kdiv ;
return ;
}
/* Finally calculate best values */
2023-08-07 10:47:43 +02:00
for ( pdiv = 1 ; pdiv < = 63 ; pdiv + + ) {
2022-03-04 13:52:56 +01:00
for ( sdiv = 0 ; sdiv < = 6 ; sdiv + + ) {
/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
mdiv = DIV_ROUND_CLOSEST ( rate * ( pdiv < < sdiv ) , prate ) ;
mdiv = clamp ( mdiv , 64 , 1023 ) ;
kdiv = pll1443x_calc_kdiv ( mdiv , pdiv , sdiv , rate , prate ) ;
fvco = pll14xx_calc_rate ( pll , mdiv , pdiv , sdiv , kdiv , prate ) ;
/* best match */
dist = abs ( ( long ) rate - ( long ) fvco ) ;
if ( dist < best ) {
best = dist ;
t - > rate = ( unsigned int ) fvco ;
t - > mdiv = mdiv ;
t - > pdiv = pdiv ;
t - > sdiv = sdiv ;
t - > kdiv = kdiv ;
if ( ! dist )
goto found ;
}
}
}
found :
pr_debug ( " %s: in=%ld, want=%ld got=%d (pdiv=%d sdiv=%d mdiv=%d kdiv=%d) \n " ,
clk_hw_get_name ( & pll - > hw ) , prate , rate , t - > rate , t - > pdiv , t - > sdiv ,
t - > mdiv , t - > kdiv ) ;
}
static long clk_pll1416x_round_rate ( struct clk_hw * hw , unsigned long rate ,
2019-01-22 09:31:41 +00:00
unsigned long * prate )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
const struct imx_pll14xx_rate_table * rate_table = pll - > rate_table ;
int i ;
2022-03-04 13:52:54 +01:00
/* Assuming rate_table is in descending order */
2019-01-22 09:31:41 +00:00
for ( i = 0 ; i < pll - > rate_count ; i + + )
if ( rate > = rate_table [ i ] . rate )
return rate_table [ i ] . rate ;
/* return minimum supported value */
2022-03-04 13:52:54 +01:00
return rate_table [ pll - > rate_count - 1 ] . rate ;
2019-01-22 09:31:41 +00:00
}
2022-03-04 13:52:56 +01:00
static long clk_pll1443x_round_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long * prate )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
struct imx_pll14xx_rate_table t ;
imx_pll14xx_calc_settings ( pll , rate , * prate , & t ) ;
return t . rate ;
}
2022-03-04 13:52:52 +01:00
static unsigned long clk_pll14xx_recalc_rate ( struct clk_hw * hw ,
2019-01-22 09:31:41 +00:00
unsigned long parent_rate )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
2022-03-04 13:52:52 +01:00
u32 mdiv , pdiv , sdiv , kdiv , pll_div_ctl0 , pll_div_ctl1 ;
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:49 +01:00
pll_div_ctl0 = readl_relaxed ( pll - > base + DIV_CTL0 ) ;
2022-03-04 13:52:51 +01:00
mdiv = FIELD_GET ( MDIV_MASK , pll_div_ctl0 ) ;
pdiv = FIELD_GET ( PDIV_MASK , pll_div_ctl0 ) ;
sdiv = FIELD_GET ( SDIV_MASK , pll_div_ctl0 ) ;
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:52 +01:00
if ( pll - > type = = PLL_1443X ) {
pll_div_ctl1 = readl_relaxed ( pll - > base + DIV_CTL1 ) ;
2022-12-10 15:38:35 -05:00
kdiv = ( s16 ) FIELD_GET ( KDIV_MASK , pll_div_ctl1 ) ;
2022-03-04 13:52:52 +01:00
} else {
kdiv = 0 ;
}
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:52 +01:00
return pll14xx_calc_rate ( pll , mdiv , pdiv , sdiv , kdiv , parent_rate ) ;
2019-01-22 09:31:41 +00:00
}
2019-09-04 12:49:18 +03:00
static inline bool clk_pll14xx_mp_change ( const struct imx_pll14xx_rate_table * rate ,
2019-01-22 09:31:41 +00:00
u32 pll_div )
{
u32 old_mdiv , old_pdiv ;
2022-03-04 13:52:51 +01:00
old_mdiv = FIELD_GET ( MDIV_MASK , pll_div ) ;
old_pdiv = FIELD_GET ( PDIV_MASK , pll_div ) ;
2019-01-22 09:31:41 +00:00
return rate - > mdiv ! = old_mdiv | | rate - > pdiv ! = old_pdiv ;
}
static int clk_pll14xx_wait_lock ( struct clk_pll14xx * pll )
{
u32 val ;
2022-03-04 13:52:49 +01:00
return readl_poll_timeout ( pll - > base + GNRL_CTL , val , val & LOCK_STATUS , 0 ,
2019-01-22 09:31:41 +00:00
LOCK_TIMEOUT_US ) ;
}
static int clk_pll1416x_set_rate ( struct clk_hw * hw , unsigned long drate ,
unsigned long prate )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
const struct imx_pll14xx_rate_table * rate ;
u32 tmp , div_val ;
int ret ;
rate = imx_get_pll_settings ( pll , drate ) ;
if ( ! rate ) {
2022-03-04 13:52:55 +01:00
pr_err ( " Invalid rate %lu for pll clk %s \n " , drate ,
clk_hw_get_name ( hw ) ) ;
2019-01-22 09:31:41 +00:00
return - EINVAL ;
}
2022-03-04 13:52:49 +01:00
tmp = readl_relaxed ( pll - > base + DIV_CTL0 ) ;
2019-01-22 09:31:41 +00:00
2019-09-04 12:49:18 +03:00
if ( ! clk_pll14xx_mp_change ( rate , tmp ) ) {
2022-03-04 13:52:50 +01:00
tmp & = ~ SDIV_MASK ;
2022-03-04 13:52:51 +01:00
tmp | = FIELD_PREP ( SDIV_MASK , rate - > sdiv ) ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( tmp , pll - > base + DIV_CTL0 ) ;
2019-01-22 09:31:41 +00:00
return 0 ;
}
/* Bypass clock and set lock to pll output lock */
2022-03-04 13:52:49 +01:00
tmp = readl_relaxed ( pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
tmp | = LOCK_SEL_MASK ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( tmp , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
/* Enable RST */
tmp & = ~ RST_MASK ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( tmp , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
2019-09-09 03:39:34 +00:00
/* Enable BYPASS */
tmp | = BYPASS_MASK ;
2022-03-04 13:52:49 +01:00
writel ( tmp , pll - > base + GNRL_CTL ) ;
2019-09-09 03:39:34 +00:00
2022-03-04 13:52:51 +01:00
div_val = FIELD_PREP ( MDIV_MASK , rate - > mdiv ) | FIELD_PREP ( PDIV_MASK , rate - > pdiv ) |
FIELD_PREP ( SDIV_MASK , rate - > sdiv ) ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( div_val , pll - > base + DIV_CTL0 ) ;
2019-01-22 09:31:41 +00:00
/*
* According to SPEC , t3 - t2 need to be greater than
* 1u s and 1 / FREF , respectively .
* FREF is FIN / Prediv , the prediv is [ 1 , 63 ] , so choose
* 3u s .
*/
udelay ( 3 ) ;
/* Disable RST */
tmp | = RST_MASK ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( tmp , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
/* Wait Lock */
ret = clk_pll14xx_wait_lock ( pll ) ;
if ( ret )
return ret ;
/* Bypass */
tmp & = ~ BYPASS_MASK ;
2022-03-04 13:52:49 +01:00
writel_relaxed ( tmp , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
return 0 ;
}
static int clk_pll1443x_set_rate ( struct clk_hw * hw , unsigned long drate ,
unsigned long prate )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
2022-03-04 13:52:56 +01:00
struct imx_pll14xx_rate_table rate ;
2022-03-04 13:52:53 +01:00
u32 gnrl_ctl , div_ctl0 ;
2019-01-22 09:31:41 +00:00
int ret ;
2022-03-04 13:52:56 +01:00
imx_pll14xx_calc_settings ( pll , drate , prate , & rate ) ;
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:53 +01:00
div_ctl0 = readl_relaxed ( pll - > base + DIV_CTL0 ) ;
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:56 +01:00
if ( ! clk_pll14xx_mp_change ( & rate , div_ctl0 ) ) {
/* only sdiv and/or kdiv changed - no need to RESET PLL */
2022-03-04 13:52:53 +01:00
div_ctl0 & = ~ SDIV_MASK ;
2022-03-04 13:52:56 +01:00
div_ctl0 | = FIELD_PREP ( SDIV_MASK , rate . sdiv ) ;
2022-03-04 13:52:53 +01:00
writel_relaxed ( div_ctl0 , pll - > base + DIV_CTL0 ) ;
2019-01-22 09:31:41 +00:00
2022-03-04 13:52:56 +01:00
writel_relaxed ( FIELD_PREP ( KDIV_MASK , rate . kdiv ) ,
2022-03-04 13:52:53 +01:00
pll - > base + DIV_CTL1 ) ;
2019-09-04 12:49:18 +03:00
2019-01-22 09:31:41 +00:00
return 0 ;
}
/* Enable RST */
2022-03-04 13:52:53 +01:00
gnrl_ctl = readl_relaxed ( pll - > base + GNRL_CTL ) ;
gnrl_ctl & = ~ RST_MASK ;
writel_relaxed ( gnrl_ctl , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
2019-09-09 03:39:34 +00:00
/* Enable BYPASS */
2022-03-04 13:52:53 +01:00
gnrl_ctl | = BYPASS_MASK ;
writel_relaxed ( gnrl_ctl , pll - > base + GNRL_CTL ) ;
2019-09-09 03:39:34 +00:00
2022-03-04 13:52:56 +01:00
div_ctl0 = FIELD_PREP ( MDIV_MASK , rate . mdiv ) |
FIELD_PREP ( PDIV_MASK , rate . pdiv ) |
FIELD_PREP ( SDIV_MASK , rate . sdiv ) ;
2022-03-04 13:52:53 +01:00
writel_relaxed ( div_ctl0 , pll - > base + DIV_CTL0 ) ;
2022-03-04 13:52:56 +01:00
writel_relaxed ( FIELD_PREP ( KDIV_MASK , rate . kdiv ) , pll - > base + DIV_CTL1 ) ;
2019-01-22 09:31:41 +00:00
/*
* According to SPEC , t3 - t2 need to be greater than
* 1u s and 1 / FREF , respectively .
* FREF is FIN / Prediv , the prediv is [ 1 , 63 ] , so choose
* 3u s .
*/
udelay ( 3 ) ;
/* Disable RST */
2022-03-04 13:52:53 +01:00
gnrl_ctl | = RST_MASK ;
writel_relaxed ( gnrl_ctl , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
/* Wait Lock*/
ret = clk_pll14xx_wait_lock ( pll ) ;
if ( ret )
return ret ;
/* Bypass */
2022-03-04 13:52:53 +01:00
gnrl_ctl & = ~ BYPASS_MASK ;
writel_relaxed ( gnrl_ctl , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
return 0 ;
}
static int clk_pll14xx_prepare ( struct clk_hw * hw )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
u32 val ;
2019-09-09 03:39:34 +00:00
int ret ;
2019-01-22 09:31:41 +00:00
/*
* RESETB = 1 from 0 , PLL starts its normal
* operation after lock time
*/
val = readl_relaxed ( pll - > base + GNRL_CTL ) ;
2019-09-09 03:39:34 +00:00
if ( val & RST_MASK )
return 0 ;
val | = BYPASS_MASK ;
writel_relaxed ( val , pll - > base + GNRL_CTL ) ;
2019-01-22 09:31:41 +00:00
val | = RST_MASK ;
writel_relaxed ( val , pll - > base + GNRL_CTL ) ;
2019-09-09 03:39:34 +00:00
ret = clk_pll14xx_wait_lock ( pll ) ;
if ( ret )
return ret ;
val & = ~ BYPASS_MASK ;
writel_relaxed ( val , pll - > base + GNRL_CTL ) ;
return 0 ;
2019-01-22 09:31:41 +00:00
}
static int clk_pll14xx_is_prepared ( struct clk_hw * hw )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
u32 val ;
val = readl_relaxed ( pll - > base + GNRL_CTL ) ;
return ( val & RST_MASK ) ? 1 : 0 ;
}
static void clk_pll14xx_unprepare ( struct clk_hw * hw )
{
struct clk_pll14xx * pll = to_clk_pll14xx ( hw ) ;
u32 val ;
/*
* Set RST to 0 , power down mode is enabled and
* every digital block is reset
*/
val = readl_relaxed ( pll - > base + GNRL_CTL ) ;
val & = ~ RST_MASK ;
writel_relaxed ( val , pll - > base + GNRL_CTL ) ;
}
static const struct clk_ops clk_pll1416x_ops = {
. prepare = clk_pll14xx_prepare ,
. unprepare = clk_pll14xx_unprepare ,
. is_prepared = clk_pll14xx_is_prepared ,
2022-03-04 13:52:52 +01:00
. recalc_rate = clk_pll14xx_recalc_rate ,
2022-03-04 13:52:56 +01:00
. round_rate = clk_pll1416x_round_rate ,
2019-01-22 09:31:41 +00:00
. set_rate = clk_pll1416x_set_rate ,
} ;
static const struct clk_ops clk_pll1416x_min_ops = {
2022-03-04 13:52:52 +01:00
. recalc_rate = clk_pll14xx_recalc_rate ,
2019-01-22 09:31:41 +00:00
} ;
static const struct clk_ops clk_pll1443x_ops = {
. prepare = clk_pll14xx_prepare ,
. unprepare = clk_pll14xx_unprepare ,
. is_prepared = clk_pll14xx_is_prepared ,
2022-03-04 13:52:52 +01:00
. recalc_rate = clk_pll14xx_recalc_rate ,
2022-03-04 13:52:56 +01:00
. round_rate = clk_pll1443x_round_rate ,
2019-01-22 09:31:41 +00:00
. set_rate = clk_pll1443x_set_rate ,
} ;
2020-04-15 11:02:46 +03:00
struct clk_hw * imx_dev_clk_hw_pll14xx ( struct device * dev , const char * name ,
const char * parent_name , void __iomem * base ,
const struct imx_pll14xx_clk * pll_clk )
2019-01-22 09:31:41 +00:00
{
struct clk_pll14xx * pll ;
2019-12-12 02:58:42 +00:00
struct clk_hw * hw ;
2019-01-22 09:31:41 +00:00
struct clk_init_data init ;
2019-12-12 02:58:42 +00:00
int ret ;
2019-09-09 03:39:39 +00:00
u32 val ;
2019-01-22 09:31:41 +00:00
pll = kzalloc ( sizeof ( * pll ) , GFP_KERNEL ) ;
if ( ! pll )
return ERR_PTR ( - ENOMEM ) ;
init . name = name ;
init . flags = pll_clk - > flags ;
init . parent_names = & parent_name ;
init . num_parents = 1 ;
switch ( pll_clk - > type ) {
case PLL_1416X :
2019-04-12 14:10:03 +00:00
if ( ! pll_clk - > rate_table )
2019-01-22 09:31:41 +00:00
init . ops = & clk_pll1416x_min_ops ;
else
init . ops = & clk_pll1416x_ops ;
break ;
case PLL_1443X :
init . ops = & clk_pll1443x_ops ;
break ;
default :
2022-03-04 13:52:55 +01:00
pr_err ( " Unknown pll type for pll clk %s \n " , name ) ;
2020-02-21 14:31:56 +08:00
kfree ( pll ) ;
return ERR_PTR ( - EINVAL ) ;
2020-10-27 11:57:56 -07:00
}
2019-01-22 09:31:41 +00:00
pll - > base = base ;
pll - > hw . init = & init ;
pll - > type = pll_clk - > type ;
pll - > rate_table = pll_clk - > rate_table ;
pll - > rate_count = pll_clk - > rate_count ;
2019-09-09 03:39:39 +00:00
val = readl_relaxed ( pll - > base + GNRL_CTL ) ;
val & = ~ BYPASS_MASK ;
writel_relaxed ( val , pll - > base + GNRL_CTL ) ;
2019-12-12 02:58:42 +00:00
hw = & pll - > hw ;
2020-04-15 11:02:46 +03:00
ret = clk_hw_register ( dev , hw ) ;
2019-12-12 02:58:42 +00:00
if ( ret ) {
2022-03-04 13:52:55 +01:00
pr_err ( " failed to register pll %s %d \n " , name , ret ) ;
2019-01-22 09:31:41 +00:00
kfree ( pll ) ;
2019-12-12 02:58:42 +00:00
return ERR_PTR ( ret ) ;
2019-01-22 09:31:41 +00:00
}
2019-12-12 02:58:42 +00:00
return hw ;
2019-01-22 09:31:41 +00:00
}
2020-07-30 09:22:51 +08:00
EXPORT_SYMBOL_GPL ( imx_dev_clk_hw_pll14xx ) ;