2018-12-11 20:57:48 +03:00
// SPDX-License-Identifier: GPL-2.0
2013-03-20 16:00:34 +04:00
/*
* Copyright ( c ) 2013 NVIDIA CORPORATION . All rights reserved .
*/
# include <linux/clk-provider.h>
2020-11-05 22:27:45 +03:00
# include <linux/device.h>
2013-03-20 16:00:34 +04:00
# include <linux/err.h>
# include <linux/slab.h>
static u8 clk_composite_get_parent ( struct clk_hw * hw )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * mux_ops = composite - > mux_ops ;
struct clk_hw * mux_hw = composite - > mux_hw ;
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( mux_hw , hw ) ;
2013-03-20 16:00:34 +04:00
return mux_ops - > get_parent ( mux_hw ) ;
}
static int clk_composite_set_parent ( struct clk_hw * hw , u8 index )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * mux_ops = composite - > mux_ops ;
struct clk_hw * mux_hw = composite - > mux_hw ;
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( mux_hw , hw ) ;
2013-03-20 16:00:34 +04:00
return mux_ops - > set_parent ( mux_hw , index ) ;
}
static unsigned long clk_composite_recalc_rate ( struct clk_hw * hw ,
unsigned long parent_rate )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
2013-04-11 22:31:36 +04:00
const struct clk_ops * rate_ops = composite - > rate_ops ;
struct clk_hw * rate_hw = composite - > rate_hw ;
2013-03-20 16:00:34 +04:00
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( rate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
2013-04-11 22:31:36 +04:00
return rate_ops - > recalc_rate ( rate_hw , parent_rate ) ;
2013-03-20 16:00:34 +04:00
}
2021-10-16 13:50:22 +03:00
static int clk_composite_determine_rate_for_parent ( struct clk_hw * rate_hw ,
struct clk_rate_request * req ,
struct clk_hw * parent_hw ,
const struct clk_ops * rate_ops )
{
long rate ;
req - > best_parent_hw = parent_hw ;
req - > best_parent_rate = clk_hw_get_rate ( parent_hw ) ;
if ( rate_ops - > determine_rate )
return rate_ops - > determine_rate ( rate_hw , req ) ;
rate = rate_ops - > round_rate ( rate_hw , req - > rate ,
& req - > best_parent_rate ) ;
if ( rate < 0 )
return rate ;
req - > rate = rate ;
return 0 ;
}
2015-07-07 21:48:08 +03:00
static int clk_composite_determine_rate ( struct clk_hw * hw ,
struct clk_rate_request * req )
2013-09-15 04:37:59 +04:00
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * rate_ops = composite - > rate_ops ;
const struct clk_ops * mux_ops = composite - > mux_ops ;
struct clk_hw * rate_hw = composite - > rate_hw ;
struct clk_hw * mux_hw = composite - > mux_hw ;
2015-07-31 03:20:57 +03:00
struct clk_hw * parent ;
2014-07-03 03:56:45 +04:00
unsigned long rate_diff ;
unsigned long best_rate_diff = ULONG_MAX ;
2021-10-16 13:50:22 +03:00
unsigned long best_rate = 0 ;
int i , ret ;
2013-09-15 04:37:59 +04:00
2021-10-16 13:50:22 +03:00
if ( rate_hw & & rate_ops & &
( rate_ops - > determine_rate | | rate_ops - > round_rate ) & &
2021-10-16 13:50:21 +03:00
mux_hw & & mux_ops & & mux_ops - > set_parent ) {
2015-07-07 21:48:08 +03:00
req - > best_parent_hw = NULL ;
2014-07-03 03:56:45 +04:00
2015-06-30 02:56:30 +03:00
if ( clk_hw_get_flags ( hw ) & CLK_SET_RATE_NO_REPARENT ) {
clk: Stop forwarding clk_rate_requests to the parent
If the clock cannot modify its rate and has CLK_SET_RATE_PARENT,
clk_mux_determine_rate_flags(), clk_core_round_rate_nolock() and a
number of drivers will forward the clk_rate_request to the parent clock.
clk_core_round_rate_nolock() will pass the pointer directly, which means
that we pass a clk_rate_request to the parent that has the rate,
min_rate and max_rate of the child, and the best_parent_rate and
best_parent_hw fields will be relative to the child as well, so will
point to our current clock and its rate. The most common case for
CLK_SET_RATE_PARENT is that the child and parent clock rates will be
equal, so the rate field isn't a worry, but the other fields are.
Similarly, if the parent clock driver ever modifies the best_parent_rate
or best_parent_hw, this will be applied to the child once the call to
clk_core_round_rate_nolock() is done. best_parent_hw is probably not
going to be a valid parent, and best_parent_rate might lead to a parent
rate change different to the one that was initially computed.
clk_mux_determine_rate_flags() and the affected drivers will copy the
request before forwarding it to the parents, so they won't be affected
by the latter issue, but the former is still going to be there and will
lead to erroneous data and context being passed to the various clock
drivers in the same sub-tree.
Let's create two new functions, clk_core_forward_rate_req() and
clk_hw_forward_rate_request() for the framework and the clock providers
that will copy a request from a child clock and update the context to
match the parent's. We also update the relevant call sites in the
framework and drivers to use that new function.
Let's also add a test to make sure we avoid regressions there.
Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com> # imx8mp
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> # exynos4210, meson g12b
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Link: https://lore.kernel.org/r/20220816112530.1837489-22-maxime@cerno.tech
Tested-by: Linux Kernel Functional Testing <lkft@linaro.org>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2022-08-16 14:25:26 +03:00
struct clk_rate_request tmp_req ;
2021-10-16 13:50:22 +03:00
2015-07-31 03:20:57 +03:00
parent = clk_hw_get_parent ( mux_hw ) ;
2014-07-03 03:56:45 +04:00
clk: Stop forwarding clk_rate_requests to the parent
If the clock cannot modify its rate and has CLK_SET_RATE_PARENT,
clk_mux_determine_rate_flags(), clk_core_round_rate_nolock() and a
number of drivers will forward the clk_rate_request to the parent clock.
clk_core_round_rate_nolock() will pass the pointer directly, which means
that we pass a clk_rate_request to the parent that has the rate,
min_rate and max_rate of the child, and the best_parent_rate and
best_parent_hw fields will be relative to the child as well, so will
point to our current clock and its rate. The most common case for
CLK_SET_RATE_PARENT is that the child and parent clock rates will be
equal, so the rate field isn't a worry, but the other fields are.
Similarly, if the parent clock driver ever modifies the best_parent_rate
or best_parent_hw, this will be applied to the child once the call to
clk_core_round_rate_nolock() is done. best_parent_hw is probably not
going to be a valid parent, and best_parent_rate might lead to a parent
rate change different to the one that was initially computed.
clk_mux_determine_rate_flags() and the affected drivers will copy the
request before forwarding it to the parents, so they won't be affected
by the latter issue, but the former is still going to be there and will
lead to erroneous data and context being passed to the various clock
drivers in the same sub-tree.
Let's create two new functions, clk_core_forward_rate_req() and
clk_hw_forward_rate_request() for the framework and the clock providers
that will copy a request from a child clock and update the context to
match the parent's. We also update the relevant call sites in the
framework and drivers to use that new function.
Let's also add a test to make sure we avoid regressions there.
Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com> # imx8mp
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> # exynos4210, meson g12b
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Link: https://lore.kernel.org/r/20220816112530.1837489-22-maxime@cerno.tech
Tested-by: Linux Kernel Functional Testing <lkft@linaro.org>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2022-08-16 14:25:26 +03:00
clk_hw_forward_rate_request ( hw , req , parent , & tmp_req , req - > rate ) ;
2021-10-16 13:50:22 +03:00
ret = clk_composite_determine_rate_for_parent ( rate_hw ,
& tmp_req ,
parent ,
rate_ops ) ;
if ( ret )
return ret ;
req - > rate = tmp_req . rate ;
2021-11-03 15:24:41 +03:00
req - > best_parent_hw = tmp_req . best_parent_hw ;
2021-10-16 13:50:22 +03:00
req - > best_parent_rate = tmp_req . best_parent_rate ;
2015-07-07 21:48:08 +03:00
return 0 ;
2014-07-03 03:56:45 +04:00
}
2015-06-26 02:53:23 +03:00
for ( i = 0 ; i < clk_hw_get_num_parents ( mux_hw ) ; i + + ) {
clk: Stop forwarding clk_rate_requests to the parent
If the clock cannot modify its rate and has CLK_SET_RATE_PARENT,
clk_mux_determine_rate_flags(), clk_core_round_rate_nolock() and a
number of drivers will forward the clk_rate_request to the parent clock.
clk_core_round_rate_nolock() will pass the pointer directly, which means
that we pass a clk_rate_request to the parent that has the rate,
min_rate and max_rate of the child, and the best_parent_rate and
best_parent_hw fields will be relative to the child as well, so will
point to our current clock and its rate. The most common case for
CLK_SET_RATE_PARENT is that the child and parent clock rates will be
equal, so the rate field isn't a worry, but the other fields are.
Similarly, if the parent clock driver ever modifies the best_parent_rate
or best_parent_hw, this will be applied to the child once the call to
clk_core_round_rate_nolock() is done. best_parent_hw is probably not
going to be a valid parent, and best_parent_rate might lead to a parent
rate change different to the one that was initially computed.
clk_mux_determine_rate_flags() and the affected drivers will copy the
request before forwarding it to the parents, so they won't be affected
by the latter issue, but the former is still going to be there and will
lead to erroneous data and context being passed to the various clock
drivers in the same sub-tree.
Let's create two new functions, clk_core_forward_rate_req() and
clk_hw_forward_rate_request() for the framework and the clock providers
that will copy a request from a child clock and update the context to
match the parent's. We also update the relevant call sites in the
framework and drivers to use that new function.
Let's also add a test to make sure we avoid regressions there.
Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com> # imx8mp
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> # exynos4210, meson g12b
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Link: https://lore.kernel.org/r/20220816112530.1837489-22-maxime@cerno.tech
Tested-by: Linux Kernel Functional Testing <lkft@linaro.org>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2022-08-16 14:25:26 +03:00
struct clk_rate_request tmp_req ;
2021-10-16 13:50:22 +03:00
2015-07-31 03:20:57 +03:00
parent = clk_hw_get_parent_by_index ( mux_hw , i ) ;
2014-07-03 03:56:45 +04:00
if ( ! parent )
continue ;
clk: Stop forwarding clk_rate_requests to the parent
If the clock cannot modify its rate and has CLK_SET_RATE_PARENT,
clk_mux_determine_rate_flags(), clk_core_round_rate_nolock() and a
number of drivers will forward the clk_rate_request to the parent clock.
clk_core_round_rate_nolock() will pass the pointer directly, which means
that we pass a clk_rate_request to the parent that has the rate,
min_rate and max_rate of the child, and the best_parent_rate and
best_parent_hw fields will be relative to the child as well, so will
point to our current clock and its rate. The most common case for
CLK_SET_RATE_PARENT is that the child and parent clock rates will be
equal, so the rate field isn't a worry, but the other fields are.
Similarly, if the parent clock driver ever modifies the best_parent_rate
or best_parent_hw, this will be applied to the child once the call to
clk_core_round_rate_nolock() is done. best_parent_hw is probably not
going to be a valid parent, and best_parent_rate might lead to a parent
rate change different to the one that was initially computed.
clk_mux_determine_rate_flags() and the affected drivers will copy the
request before forwarding it to the parents, so they won't be affected
by the latter issue, but the former is still going to be there and will
lead to erroneous data and context being passed to the various clock
drivers in the same sub-tree.
Let's create two new functions, clk_core_forward_rate_req() and
clk_hw_forward_rate_request() for the framework and the clock providers
that will copy a request from a child clock and update the context to
match the parent's. We also update the relevant call sites in the
framework and drivers to use that new function.
Let's also add a test to make sure we avoid regressions there.
Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com> # imx8mp
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> # exynos4210, meson g12b
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Link: https://lore.kernel.org/r/20220816112530.1837489-22-maxime@cerno.tech
Tested-by: Linux Kernel Functional Testing <lkft@linaro.org>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2022-08-16 14:25:26 +03:00
clk_hw_forward_rate_request ( hw , req , parent , & tmp_req , req - > rate ) ;
2021-10-16 13:50:22 +03:00
ret = clk_composite_determine_rate_for_parent ( rate_hw ,
& tmp_req ,
parent ,
rate_ops ) ;
if ( ret )
2014-07-03 03:56:45 +04:00
continue ;
2023-05-26 20:10:56 +03:00
if ( req - > rate > = tmp_req . rate )
rate_diff = req - > rate - tmp_req . rate ;
else
rate_diff = tmp_req . rate - req - > rate ;
2014-07-03 03:56:45 +04:00
2015-07-07 21:48:08 +03:00
if ( ! rate_diff | | ! req - > best_parent_hw
2014-07-03 03:56:45 +04:00
| | best_rate_diff > rate_diff ) {
2015-07-31 03:20:57 +03:00
req - > best_parent_hw = parent ;
2021-10-16 13:50:22 +03:00
req - > best_parent_rate = tmp_req . best_parent_rate ;
2014-07-03 03:56:45 +04:00
best_rate_diff = rate_diff ;
2021-10-16 13:50:22 +03:00
best_rate = tmp_req . rate ;
2014-07-03 03:56:45 +04:00
}
if ( ! rate_diff )
2015-07-07 21:48:08 +03:00
return 0 ;
2014-07-03 03:56:45 +04:00
}
2015-07-07 21:48:08 +03:00
req - > rate = best_rate ;
return 0 ;
2021-10-16 13:50:21 +03:00
} else if ( rate_hw & & rate_ops & & rate_ops - > determine_rate ) {
__clk_hw_set_clk ( rate_hw , hw ) ;
return rate_ops - > determine_rate ( rate_hw , req ) ;
2013-09-15 04:37:59 +04:00
} else if ( mux_hw & & mux_ops & & mux_ops - > determine_rate ) {
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( mux_hw , hw ) ;
2015-07-07 21:48:08 +03:00
return mux_ops - > determine_rate ( mux_hw , req ) ;
2013-09-15 04:37:59 +04:00
} else {
pr_err ( " clk: clk_composite_determine_rate function called, but no mux or rate callback set! \n " ) ;
2015-07-09 23:39:38 +03:00
return - EINVAL ;
2013-09-15 04:37:59 +04:00
}
}
2013-03-20 16:00:34 +04:00
static long clk_composite_round_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long * prate )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
2013-04-11 22:31:36 +04:00
const struct clk_ops * rate_ops = composite - > rate_ops ;
struct clk_hw * rate_hw = composite - > rate_hw ;
2013-03-20 16:00:34 +04:00
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( rate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
2013-04-11 22:31:36 +04:00
return rate_ops - > round_rate ( rate_hw , rate , prate ) ;
2013-03-20 16:00:34 +04:00
}
static int clk_composite_set_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long parent_rate )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
2013-04-11 22:31:36 +04:00
const struct clk_ops * rate_ops = composite - > rate_ops ;
struct clk_hw * rate_hw = composite - > rate_hw ;
2013-03-20 16:00:34 +04:00
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( rate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
2013-04-11 22:31:36 +04:00
return rate_ops - > set_rate ( rate_hw , rate , parent_rate ) ;
2013-03-20 16:00:34 +04:00
}
2016-04-12 11:43:39 +03:00
static int clk_composite_set_rate_and_parent ( struct clk_hw * hw ,
unsigned long rate ,
unsigned long parent_rate ,
u8 index )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * rate_ops = composite - > rate_ops ;
const struct clk_ops * mux_ops = composite - > mux_ops ;
struct clk_hw * rate_hw = composite - > rate_hw ;
struct clk_hw * mux_hw = composite - > mux_hw ;
unsigned long temp_rate ;
__clk_hw_set_clk ( rate_hw , hw ) ;
__clk_hw_set_clk ( mux_hw , hw ) ;
temp_rate = rate_ops - > recalc_rate ( rate_hw , parent_rate ) ;
if ( temp_rate > rate ) {
rate_ops - > set_rate ( rate_hw , rate , parent_rate ) ;
mux_ops - > set_parent ( mux_hw , index ) ;
} else {
mux_ops - > set_parent ( mux_hw , index ) ;
rate_ops - > set_rate ( rate_hw , rate , parent_rate ) ;
}
return 0 ;
}
2013-03-20 16:00:34 +04:00
static int clk_composite_is_enabled ( struct clk_hw * hw )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * gate_ops = composite - > gate_ops ;
struct clk_hw * gate_hw = composite - > gate_hw ;
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( gate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
return gate_ops - > is_enabled ( gate_hw ) ;
}
static int clk_composite_enable ( struct clk_hw * hw )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * gate_ops = composite - > gate_ops ;
struct clk_hw * gate_hw = composite - > gate_hw ;
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( gate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
return gate_ops - > enable ( gate_hw ) ;
}
static void clk_composite_disable ( struct clk_hw * hw )
{
struct clk_composite * composite = to_clk_composite ( hw ) ;
const struct clk_ops * gate_ops = composite - > gate_ops ;
struct clk_hw * gate_hw = composite - > gate_hw ;
2015-02-12 16:58:30 +03:00
__clk_hw_set_clk ( gate_hw , hw ) ;
2013-03-20 16:00:34 +04:00
gate_ops - > disable ( gate_hw ) ;
}
2020-01-03 02:10:59 +03:00
static struct clk_hw * __clk_hw_register_composite ( struct device * dev ,
const char * name , const char * const * parent_names ,
const struct clk_parent_data * pdata , int num_parents ,
2013-03-20 16:00:34 +04:00
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
2013-04-11 22:31:36 +04:00
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
2013-03-20 16:00:34 +04:00
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
2016-02-07 11:20:31 +03:00
struct clk_hw * hw ;
2019-11-15 19:28:55 +03:00
struct clk_init_data init = { } ;
2013-03-20 16:00:34 +04:00
struct clk_composite * composite ;
struct clk_ops * clk_composite_ops ;
2016-02-07 11:20:31 +03:00
int ret ;
2013-03-20 16:00:34 +04:00
composite = kzalloc ( sizeof ( * composite ) , GFP_KERNEL ) ;
2015-05-15 02:47:10 +03:00
if ( ! composite )
2013-03-20 16:00:34 +04:00
return ERR_PTR ( - ENOMEM ) ;
init . name = name ;
2019-04-25 20:57:37 +03:00
init . flags = flags ;
2020-01-03 02:10:59 +03:00
if ( parent_names )
init . parent_names = parent_names ;
else
init . parent_data = pdata ;
2013-03-20 16:00:34 +04:00
init . num_parents = num_parents ;
2016-02-07 11:20:31 +03:00
hw = & composite - > hw ;
2013-03-20 16:00:34 +04:00
clk_composite_ops = & composite - > ops ;
if ( mux_hw & & mux_ops ) {
2014-07-03 03:57:30 +04:00
if ( ! mux_ops - > get_parent ) {
2016-02-07 11:20:31 +03:00
hw = ERR_PTR ( - EINVAL ) ;
2013-03-20 16:00:34 +04:00
goto err ;
}
composite - > mux_hw = mux_hw ;
composite - > mux_ops = mux_ops ;
clk_composite_ops - > get_parent = clk_composite_get_parent ;
2014-07-03 03:57:30 +04:00
if ( mux_ops - > set_parent )
clk_composite_ops - > set_parent = clk_composite_set_parent ;
2013-09-15 04:37:59 +04:00
if ( mux_ops - > determine_rate )
clk_composite_ops - > determine_rate = clk_composite_determine_rate ;
2013-03-20 16:00:34 +04:00
}
2013-04-11 22:31:36 +04:00
if ( rate_hw & & rate_ops ) {
2013-04-11 22:31:37 +04:00
if ( ! rate_ops - > recalc_rate ) {
2016-02-07 11:20:31 +03:00
hw = ERR_PTR ( - EINVAL ) ;
2013-03-20 16:00:34 +04:00
goto err ;
}
2014-07-03 03:58:14 +04:00
clk_composite_ops - > recalc_rate = clk_composite_recalc_rate ;
2013-03-20 16:00:34 +04:00
2014-07-03 03:58:14 +04:00
if ( rate_ops - > determine_rate )
clk_composite_ops - > determine_rate =
clk_composite_determine_rate ;
else if ( rate_ops - > round_rate )
clk_composite_ops - > round_rate =
clk_composite_round_rate ;
/* .set_rate requires either .round_rate or .determine_rate */
if ( rate_ops - > set_rate ) {
if ( rate_ops - > determine_rate | | rate_ops - > round_rate )
clk_composite_ops - > set_rate =
clk_composite_set_rate ;
else
WARN ( 1 , " %s: missing round_rate op is required \n " ,
__func__ ) ;
2013-04-11 22:31:37 +04:00
}
2013-04-11 22:31:36 +04:00
composite - > rate_hw = rate_hw ;
composite - > rate_ops = rate_ops ;
2013-03-20 16:00:34 +04:00
}
2016-04-12 11:43:39 +03:00
if ( mux_hw & & mux_ops & & rate_hw & & rate_ops ) {
if ( mux_ops - > set_parent & & rate_ops - > set_rate )
clk_composite_ops - > set_rate_and_parent =
clk_composite_set_rate_and_parent ;
}
2013-03-20 16:00:34 +04:00
if ( gate_hw & & gate_ops ) {
if ( ! gate_ops - > is_enabled | | ! gate_ops - > enable | |
! gate_ops - > disable ) {
2016-02-07 11:20:31 +03:00
hw = ERR_PTR ( - EINVAL ) ;
2013-03-20 16:00:34 +04:00
goto err ;
}
composite - > gate_hw = gate_hw ;
composite - > gate_ops = gate_ops ;
clk_composite_ops - > is_enabled = clk_composite_is_enabled ;
clk_composite_ops - > enable = clk_composite_enable ;
clk_composite_ops - > disable = clk_composite_disable ;
}
init . ops = clk_composite_ops ;
composite - > hw . init = & init ;
2016-02-07 11:20:31 +03:00
ret = clk_hw_register ( dev , hw ) ;
if ( ret ) {
hw = ERR_PTR ( ret ) ;
2013-03-20 16:00:34 +04:00
goto err ;
2016-02-07 11:20:31 +03:00
}
2013-03-20 16:00:34 +04:00
if ( composite - > mux_hw )
2016-02-07 11:20:31 +03:00
composite - > mux_hw - > clk = hw - > clk ;
2013-03-20 16:00:34 +04:00
2013-04-11 22:31:36 +04:00
if ( composite - > rate_hw )
2016-02-07 11:20:31 +03:00
composite - > rate_hw - > clk = hw - > clk ;
2013-03-20 16:00:34 +04:00
if ( composite - > gate_hw )
2016-02-07 11:20:31 +03:00
composite - > gate_hw - > clk = hw - > clk ;
2013-03-20 16:00:34 +04:00
2016-02-07 11:20:31 +03:00
return hw ;
2013-03-20 16:00:34 +04:00
err :
kfree ( composite ) ;
2016-02-07 11:20:31 +03:00
return hw ;
}
2020-01-03 02:10:59 +03:00
struct clk_hw * clk_hw_register_composite ( struct device * dev , const char * name ,
const char * const * parent_names , int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
return __clk_hw_register_composite ( dev , name , parent_names , NULL ,
num_parents , mux_hw , mux_ops ,
rate_hw , rate_ops , gate_hw ,
gate_ops , flags ) ;
}
2020-07-30 04:22:50 +03:00
EXPORT_SYMBOL_GPL ( clk_hw_register_composite ) ;
2020-01-03 02:10:59 +03:00
struct clk_hw * clk_hw_register_composite_pdata ( struct device * dev ,
const char * name ,
const struct clk_parent_data * parent_data ,
int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
return __clk_hw_register_composite ( dev , name , NULL , parent_data ,
num_parents , mux_hw , mux_ops ,
rate_hw , rate_ops , gate_hw ,
gate_ops , flags ) ;
}
2016-02-07 11:20:31 +03:00
struct clk * clk_register_composite ( struct device * dev , const char * name ,
const char * const * parent_names , int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
struct clk_hw * hw ;
hw = clk_hw_register_composite ( dev , name , parent_names , num_parents ,
mux_hw , mux_ops , rate_hw , rate_ops , gate_hw , gate_ops ,
flags ) ;
if ( IS_ERR ( hw ) )
return ERR_CAST ( hw ) ;
return hw - > clk ;
2013-03-20 16:00:34 +04:00
}
2021-09-02 01:25:24 +03:00
EXPORT_SYMBOL_GPL ( clk_register_composite ) ;
2016-03-23 19:38:24 +03:00
2020-01-03 02:10:59 +03:00
struct clk * clk_register_composite_pdata ( struct device * dev , const char * name ,
const struct clk_parent_data * parent_data ,
int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
struct clk_hw * hw ;
hw = clk_hw_register_composite_pdata ( dev , name , parent_data ,
num_parents , mux_hw , mux_ops , rate_hw , rate_ops ,
gate_hw , gate_ops , flags ) ;
if ( IS_ERR ( hw ) )
return ERR_CAST ( hw ) ;
return hw - > clk ;
}
2016-03-23 19:38:24 +03:00
void clk_unregister_composite ( struct clk * clk )
{
struct clk_composite * composite ;
struct clk_hw * hw ;
hw = __clk_get_hw ( clk ) ;
if ( ! hw )
return ;
composite = to_clk_composite ( hw ) ;
clk_unregister ( clk ) ;
kfree ( composite ) ;
}
2019-11-15 19:28:56 +03:00
void clk_hw_unregister_composite ( struct clk_hw * hw )
{
struct clk_composite * composite ;
composite = to_clk_composite ( hw ) ;
clk_hw_unregister ( hw ) ;
kfree ( composite ) ;
}
EXPORT_SYMBOL_GPL ( clk_hw_unregister_composite ) ;
2020-11-05 22:27:45 +03:00
static void devm_clk_hw_release_composite ( struct device * dev , void * res )
{
clk_hw_unregister_composite ( * ( struct clk_hw * * ) res ) ;
}
static struct clk_hw * __devm_clk_hw_register_composite ( struct device * dev ,
const char * name , const char * const * parent_names ,
const struct clk_parent_data * pdata , int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
struct clk_hw * * ptr , * hw ;
ptr = devres_alloc ( devm_clk_hw_release_composite , sizeof ( * ptr ) ,
GFP_KERNEL ) ;
if ( ! ptr )
return ERR_PTR ( - ENOMEM ) ;
hw = __clk_hw_register_composite ( dev , name , parent_names , pdata ,
num_parents , mux_hw , mux_ops , rate_hw ,
rate_ops , gate_hw , gate_ops , flags ) ;
if ( ! IS_ERR ( hw ) ) {
* ptr = hw ;
devres_add ( dev , ptr ) ;
} else {
devres_free ( ptr ) ;
}
return hw ;
}
struct clk_hw * devm_clk_hw_register_composite_pdata ( struct device * dev ,
const char * name ,
const struct clk_parent_data * parent_data ,
int num_parents ,
struct clk_hw * mux_hw , const struct clk_ops * mux_ops ,
struct clk_hw * rate_hw , const struct clk_ops * rate_ops ,
struct clk_hw * gate_hw , const struct clk_ops * gate_ops ,
unsigned long flags )
{
return __devm_clk_hw_register_composite ( dev , name , NULL , parent_data ,
num_parents , mux_hw , mux_ops ,
rate_hw , rate_ops , gate_hw ,
gate_ops , flags ) ;
}