2018-12-11 20:43:03 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-05-19 16:49:04 +03:00
/*
* Copyright 2017 NXP
*
* Dong Aisheng < aisheng . dong @ nxp . com >
*/
# include <linux/clk.h>
2018-08-31 07:45:54 +03:00
# include <linux/clk-provider.h>
2017-05-19 16:49:04 +03:00
# include <linux/device.h>
# include <linux/export.h>
2018-08-31 07:45:53 +03:00
# include <linux/of.h>
2018-08-31 07:45:54 +03:00
# include <linux/slab.h>
2018-08-31 07:45:53 +03:00
static int __must_check of_clk_bulk_get ( struct device_node * np , int num_clks ,
struct clk_bulk_data * clks )
{
int ret ;
int i ;
2019-09-13 05:40:29 +03:00
for ( i = 0 ; i < num_clks ; i + + ) {
clks [ i ] . id = NULL ;
2018-08-31 07:45:53 +03:00
clks [ i ] . clk = NULL ;
2019-09-13 05:40:29 +03:00
}
2018-08-31 07:45:53 +03:00
for ( i = 0 ; i < num_clks ; i + + ) {
2019-09-13 05:40:29 +03:00
of_property_read_string_index ( np , " clock-names " , i , & clks [ i ] . id ) ;
2018-08-31 07:45:53 +03:00
clks [ i ] . clk = of_clk_get ( np , i ) ;
if ( IS_ERR ( clks [ i ] . clk ) ) {
ret = PTR_ERR ( clks [ i ] . clk ) ;
pr_err ( " %pOF: Failed to get clk index: %d ret: %d \n " ,
np , i , ret ) ;
clks [ i ] . clk = NULL ;
goto err ;
}
}
return 0 ;
err :
clk_bulk_put ( i , clks ) ;
return ret ;
}
2017-05-19 16:49:04 +03:00
2018-08-31 07:45:54 +03:00
static int __must_check of_clk_bulk_get_all ( struct device_node * np ,
struct clk_bulk_data * * clks )
{
struct clk_bulk_data * clk_bulk ;
int num_clks ;
int ret ;
num_clks = of_clk_get_parent_count ( np ) ;
if ( ! num_clks )
return 0 ;
clk_bulk = kmalloc_array ( num_clks , sizeof ( * clk_bulk ) , GFP_KERNEL ) ;
if ( ! clk_bulk )
return - ENOMEM ;
ret = of_clk_bulk_get ( np , num_clks , clk_bulk ) ;
if ( ret ) {
kfree ( clk_bulk ) ;
return ret ;
}
* clks = clk_bulk ;
return num_clks ;
}
2017-05-19 16:49:04 +03:00
void clk_bulk_put ( int num_clks , struct clk_bulk_data * clks )
{
while ( - - num_clks > = 0 ) {
clk_put ( clks [ num_clks ] . clk ) ;
clks [ num_clks ] . clk = NULL ;
}
}
EXPORT_SYMBOL_GPL ( clk_bulk_put ) ;
2019-06-19 12:39:25 +03:00
static int __clk_bulk_get ( struct device * dev , int num_clks ,
struct clk_bulk_data * clks , bool optional )
2017-05-19 16:49:04 +03:00
{
int ret ;
int i ;
for ( i = 0 ; i < num_clks ; i + + )
clks [ i ] . clk = NULL ;
for ( i = 0 ; i < num_clks ; i + + ) {
clks [ i ] . clk = clk_get ( dev , clks [ i ] . id ) ;
if ( IS_ERR ( clks [ i ] . clk ) ) {
ret = PTR_ERR ( clks [ i ] . clk ) ;
2019-06-19 12:39:25 +03:00
clks [ i ] . clk = NULL ;
if ( ret = = - ENOENT & & optional )
continue ;
2022-09-13 06:24:03 +03:00
dev_err_probe ( dev , ret ,
" Failed to get clk '%s' \n " ,
clks [ i ] . id ) ;
2017-05-19 16:49:04 +03:00
goto err ;
}
}
return 0 ;
err :
clk_bulk_put ( i , clks ) ;
return ret ;
}
2019-06-19 12:39:25 +03:00
int __must_check clk_bulk_get ( struct device * dev , int num_clks ,
struct clk_bulk_data * clks )
{
return __clk_bulk_get ( dev , num_clks , clks , false ) ;
}
2017-05-19 16:49:04 +03:00
EXPORT_SYMBOL ( clk_bulk_get ) ;
2019-06-19 12:39:25 +03:00
int __must_check clk_bulk_get_optional ( struct device * dev , int num_clks ,
struct clk_bulk_data * clks )
{
return __clk_bulk_get ( dev , num_clks , clks , true ) ;
}
EXPORT_SYMBOL_GPL ( clk_bulk_get_optional ) ;
2018-08-31 07:45:54 +03:00
void clk_bulk_put_all ( int num_clks , struct clk_bulk_data * clks )
{
if ( IS_ERR_OR_NULL ( clks ) )
return ;
clk_bulk_put ( num_clks , clks ) ;
kfree ( clks ) ;
}
EXPORT_SYMBOL ( clk_bulk_put_all ) ;
int __must_check clk_bulk_get_all ( struct device * dev ,
struct clk_bulk_data * * clks )
{
struct device_node * np = dev_of_node ( dev ) ;
if ( ! np )
return 0 ;
return of_clk_bulk_get_all ( np , clks ) ;
}
EXPORT_SYMBOL ( clk_bulk_get_all ) ;
2017-05-19 16:49:04 +03:00
# ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_bulk_unprepare - undo preparation of a set of clock sources
* @ num_clks : the number of clk_bulk_data
* @ clks : the clk_bulk_data table being unprepared
*
* clk_bulk_unprepare may sleep , which differentiates it from clk_bulk_disable .
* Returns 0 on success , - EERROR otherwise .
*/
void clk_bulk_unprepare ( int num_clks , const struct clk_bulk_data * clks )
{
while ( - - num_clks > = 0 )
clk_unprepare ( clks [ num_clks ] . clk ) ;
}
EXPORT_SYMBOL_GPL ( clk_bulk_unprepare ) ;
/**
* clk_bulk_prepare - prepare a set of clocks
* @ num_clks : the number of clk_bulk_data
* @ clks : the clk_bulk_data table being prepared
*
* clk_bulk_prepare may sleep , which differentiates it from clk_bulk_enable .
* Returns 0 on success , - EERROR otherwise .
*/
int __must_check clk_bulk_prepare ( int num_clks ,
const struct clk_bulk_data * clks )
{
int ret ;
int i ;
for ( i = 0 ; i < num_clks ; i + + ) {
ret = clk_prepare ( clks [ i ] . clk ) ;
if ( ret ) {
pr_err ( " Failed to prepare clk '%s': %d \n " ,
clks [ i ] . id , ret ) ;
goto err ;
}
}
return 0 ;
err :
clk_bulk_unprepare ( i , clks ) ;
return ret ;
}
2017-09-23 08:00:29 +03:00
EXPORT_SYMBOL_GPL ( clk_bulk_prepare ) ;
2017-05-19 16:49:04 +03:00
# endif /* CONFIG_HAVE_CLK_PREPARE */
/**
* clk_bulk_disable - gate a set of clocks
* @ num_clks : the number of clk_bulk_data
* @ clks : the clk_bulk_data table being gated
*
* clk_bulk_disable must not sleep , which differentiates it from
* clk_bulk_unprepare . clk_bulk_disable must be called before
* clk_bulk_unprepare .
*/
void clk_bulk_disable ( int num_clks , const struct clk_bulk_data * clks )
{
while ( - - num_clks > = 0 )
clk_disable ( clks [ num_clks ] . clk ) ;
}
EXPORT_SYMBOL_GPL ( clk_bulk_disable ) ;
/**
* clk_bulk_enable - ungate a set of clocks
* @ num_clks : the number of clk_bulk_data
* @ clks : the clk_bulk_data table being ungated
*
* clk_bulk_enable must not sleep
* Returns 0 on success , - EERROR otherwise .
*/
int __must_check clk_bulk_enable ( int num_clks , const struct clk_bulk_data * clks )
{
int ret ;
int i ;
for ( i = 0 ; i < num_clks ; i + + ) {
ret = clk_enable ( clks [ i ] . clk ) ;
if ( ret ) {
pr_err ( " Failed to enable clk '%s': %d \n " ,
clks [ i ] . id , ret ) ;
goto err ;
}
}
return 0 ;
err :
clk_bulk_disable ( i , clks ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( clk_bulk_enable ) ;