2008-09-09 11:04:14 -07:00
/* arch/arm/mach-msm/clock.c
*
* Copyright ( C ) 2007 Google , Inc .
2010-05-12 13:43:28 -07:00
* Copyright ( c ) 2007 - 2010 , Code Aurora Forum . All rights reserved .
2008-09-09 11:04:14 -07:00
*
* This software is licensed under the terms of the GNU General Public
* License version 2 , as published by the Free Software Foundation , and
* may be copied , distributed , and modified under those terms .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
*/
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/err.h>
# include <linux/spinlock.h>
2011-08-25 15:35:03 +02:00
# include <linux/pm_qos.h>
2011-02-23 09:37:42 -08:00
# include <linux/mutex.h>
# include <linux/clk.h>
# include <linux/string.h>
# include <linux/module.h>
# include <linux/clkdev.h>
2008-09-09 11:04:14 -07:00
# include "clock.h"
static DEFINE_MUTEX ( clocks_mutex ) ;
static DEFINE_SPINLOCK ( clocks_lock ) ;
static LIST_HEAD ( clocks ) ;
/*
* Standard clock functions defined in include / linux / clk . h
*/
int clk_enable ( struct clk * clk )
{
unsigned long flags ;
spin_lock_irqsave ( & clocks_lock , flags ) ;
clk - > count + + ;
2011-01-26 16:20:52 -08:00
if ( clk - > count = = 1 )
2010-05-12 13:43:28 -07:00
clk - > ops - > enable ( clk - > id ) ;
2008-09-09 11:04:14 -07:00
spin_unlock_irqrestore ( & clocks_lock , flags ) ;
return 0 ;
}
EXPORT_SYMBOL ( clk_enable ) ;
void clk_disable ( struct clk * clk )
{
unsigned long flags ;
spin_lock_irqsave ( & clocks_lock , flags ) ;
BUG_ON ( clk - > count = = 0 ) ;
clk - > count - - ;
2011-01-26 16:20:52 -08:00
if ( clk - > count = = 0 )
2010-05-12 13:43:28 -07:00
clk - > ops - > disable ( clk - > id ) ;
2008-09-09 11:04:14 -07:00
spin_unlock_irqrestore ( & clocks_lock , flags ) ;
}
EXPORT_SYMBOL ( clk_disable ) ;
2010-05-12 13:43:28 -07:00
int clk_reset ( struct clk * clk , enum clk_reset_action action )
{
return clk - > ops - > reset ( clk - > remote_id , action ) ;
}
EXPORT_SYMBOL ( clk_reset ) ;
2008-09-09 11:04:14 -07:00
unsigned long clk_get_rate ( struct clk * clk )
{
2010-05-12 13:43:28 -07:00
return clk - > ops - > get_rate ( clk - > id ) ;
2008-09-09 11:04:14 -07:00
}
EXPORT_SYMBOL ( clk_get_rate ) ;
int clk_set_rate ( struct clk * clk , unsigned long rate )
{
2010-12-13 14:35:10 -08:00
int ret ;
if ( clk - > flags & CLKFLAG_MAX ) {
ret = clk - > ops - > set_max_rate ( clk - > id , rate ) ;
if ( ret )
return ret ;
}
if ( clk - > flags & CLKFLAG_MIN ) {
ret = clk - > ops - > set_min_rate ( clk - > id , rate ) ;
if ( ret )
return ret ;
}
if ( clk - > flags & CLKFLAG_MAX | | clk - > flags & CLKFLAG_MIN )
return ret ;
2010-05-12 13:43:28 -07:00
return clk - > ops - > set_rate ( clk - > id , rate ) ;
2008-09-09 11:04:14 -07:00
}
EXPORT_SYMBOL ( clk_set_rate ) ;
2010-05-12 13:43:28 -07:00
long clk_round_rate ( struct clk * clk , unsigned long rate )
{
return clk - > ops - > round_rate ( clk - > id , rate ) ;
}
EXPORT_SYMBOL ( clk_round_rate ) ;
int clk_set_min_rate ( struct clk * clk , unsigned long rate )
{
return clk - > ops - > set_min_rate ( clk - > id , rate ) ;
}
EXPORT_SYMBOL ( clk_set_min_rate ) ;
int clk_set_max_rate ( struct clk * clk , unsigned long rate )
{
return clk - > ops - > set_max_rate ( clk - > id , rate ) ;
}
EXPORT_SYMBOL ( clk_set_max_rate ) ;
2008-09-09 11:04:14 -07:00
int clk_set_parent ( struct clk * clk , struct clk * parent )
{
return - ENOSYS ;
}
EXPORT_SYMBOL ( clk_set_parent ) ;
struct clk * clk_get_parent ( struct clk * clk )
{
return ERR_PTR ( - ENOSYS ) ;
}
EXPORT_SYMBOL ( clk_get_parent ) ;
int clk_set_flags ( struct clk * clk , unsigned long flags )
{
if ( clk = = NULL | | IS_ERR ( clk ) )
return - EINVAL ;
2010-05-12 13:43:28 -07:00
return clk - > ops - > set_flags ( clk - > id , flags ) ;
2008-09-09 11:04:14 -07:00
}
EXPORT_SYMBOL ( clk_set_flags ) ;
2010-05-12 13:43:28 -07:00
/* EBI1 is the only shared clock that several clients want to vote on as of
* this commit . If this changes in the future , then it might be better to
* make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more
* generic to support different clocks .
*/
static struct clk * ebi1_clk ;
2008-09-09 11:04:14 -07:00
2011-02-23 09:37:42 -08:00
void __init msm_clock_init ( struct clk_lookup * clock_tbl , unsigned num_clocks )
2008-09-09 11:04:14 -07:00
{
unsigned n ;
mutex_lock ( & clocks_mutex ) ;
2011-02-23 09:37:42 -08:00
for ( n = 0 ; n < num_clocks ; n + + ) {
clkdev_add ( & clock_tbl [ n ] ) ;
list_add_tail ( & clock_tbl [ n ] . clk - > list , & clocks ) ;
}
2008-09-09 11:04:14 -07:00
mutex_unlock ( & clocks_mutex ) ;
2010-05-12 13:43:28 -07:00
ebi1_clk = clk_get ( NULL , " ebi1_clk " ) ;
BUG_ON ( ebi1_clk = = NULL ) ;
}
2008-09-09 11:04:14 -07:00
/* The bootloader and/or AMSS may have left various clocks enabled.
* Disable any clocks that belong to us ( CLKFLAG_AUTO_OFF ) but have
* not been explicitly enabled by a clk_enable ( ) call .
*/
static int __init clock_late_init ( void )
{
unsigned long flags ;
struct clk * clk ;
unsigned count = 0 ;
2011-01-26 16:20:54 -08:00
clock_debug_init ( ) ;
2008-09-09 11:04:14 -07:00
mutex_lock ( & clocks_mutex ) ;
list_for_each_entry ( clk , & clocks , list ) {
2011-01-26 16:20:54 -08:00
clock_debug_add ( clk ) ;
2008-09-09 11:04:14 -07:00
if ( clk - > flags & CLKFLAG_AUTO_OFF ) {
spin_lock_irqsave ( & clocks_lock , flags ) ;
if ( ! clk - > count ) {
count + + ;
2010-05-12 13:43:28 -07:00
clk - > ops - > auto_off ( clk - > id ) ;
2008-09-09 11:04:14 -07:00
}
spin_unlock_irqrestore ( & clocks_lock , flags ) ;
}
}
mutex_unlock ( & clocks_mutex ) ;
pr_info ( " clock_late_init() disabled %d unused clocks \n " , count ) ;
return 0 ;
}
late_initcall ( clock_late_init ) ;
2010-05-12 13:43:28 -07:00