2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2014-04-16 09:24:44 +02:00
/*
* Copyright ( C ) 2014 NVIDIA CORPORATION . All rights reserved .
*/
# include <linux/clk.h>
2018-04-13 14:33:49 +03:00
# include <linux/delay.h>
2019-02-15 16:28:19 +01:00
# include <linux/dma-mapping.h>
2014-04-16 09:24:44 +02:00
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/of.h>
2018-12-12 23:38:54 +03:00
# include <linux/of_device.h>
2014-04-16 09:24:44 +02:00
# include <linux/platform_device.h>
# include <linux/slab.h>
2015-03-12 15:48:02 +01:00
# include <linux/sort.h>
# include <soc/tegra/fuse.h>
2014-04-16 09:24:44 +02:00
# include "mc.h"
static const struct of_device_id tegra_mc_of_match [ ] = {
2018-04-09 22:28:31 +03:00
# ifdef CONFIG_ARCH_TEGRA_2x_SOC
2018-12-12 23:38:52 +03:00
{ . compatible = " nvidia,tegra20-mc-gart " , . data = & tegra20_mc_soc } ,
2018-04-09 22:28:31 +03:00
# endif
2014-04-16 09:24:44 +02:00
# ifdef CONFIG_ARCH_TEGRA_3x_SOC
{ . compatible = " nvidia,tegra30-mc " , . data = & tegra30_mc_soc } ,
# endif
# ifdef CONFIG_ARCH_TEGRA_114_SOC
{ . compatible = " nvidia,tegra114-mc " , . data = & tegra114_mc_soc } ,
# endif
# ifdef CONFIG_ARCH_TEGRA_124_SOC
{ . compatible = " nvidia,tegra124-mc " , . data = & tegra124_mc_soc } ,
2014-11-07 16:10:41 +01:00
# endif
# ifdef CONFIG_ARCH_TEGRA_132_SOC
{ . compatible = " nvidia,tegra132-mc " , . data = & tegra132_mc_soc } ,
2015-03-23 10:45:12 +01:00
# endif
# ifdef CONFIG_ARCH_TEGRA_210_SOC
{ . compatible = " nvidia,tegra210-mc " , . data = & tegra210_mc_soc } ,
2014-04-16 09:24:44 +02:00
# endif
{ }
} ;
MODULE_DEVICE_TABLE ( of , tegra_mc_of_match ) ;
2019-04-11 10:48:25 +02:00
static int tegra_mc_block_dma_common ( struct tegra_mc * mc ,
2018-04-13 14:33:49 +03:00
const struct tegra_mc_reset * rst )
{
unsigned long flags ;
u32 value ;
spin_lock_irqsave ( & mc - > lock , flags ) ;
value = mc_readl ( mc , rst - > control ) | BIT ( rst - > bit ) ;
mc_writel ( mc , value , rst - > control ) ;
spin_unlock_irqrestore ( & mc - > lock , flags ) ;
return 0 ;
}
2019-04-11 10:48:25 +02:00
static bool tegra_mc_dma_idling_common ( struct tegra_mc * mc ,
2018-04-13 14:33:49 +03:00
const struct tegra_mc_reset * rst )
{
return ( mc_readl ( mc , rst - > status ) & BIT ( rst - > bit ) ) ! = 0 ;
}
2019-04-11 10:48:25 +02:00
static int tegra_mc_unblock_dma_common ( struct tegra_mc * mc ,
2018-04-13 14:33:49 +03:00
const struct tegra_mc_reset * rst )
{
unsigned long flags ;
u32 value ;
spin_lock_irqsave ( & mc - > lock , flags ) ;
value = mc_readl ( mc , rst - > control ) & ~ BIT ( rst - > bit ) ;
mc_writel ( mc , value , rst - > control ) ;
spin_unlock_irqrestore ( & mc - > lock , flags ) ;
return 0 ;
}
2019-04-11 10:48:25 +02:00
static int tegra_mc_reset_status_common ( struct tegra_mc * mc ,
2018-04-13 14:33:49 +03:00
const struct tegra_mc_reset * rst )
{
return ( mc_readl ( mc , rst - > control ) & BIT ( rst - > bit ) ) ! = 0 ;
}
2019-04-11 10:48:25 +02:00
const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
. block_dma = tegra_mc_block_dma_common ,
. dma_idling = tegra_mc_dma_idling_common ,
. unblock_dma = tegra_mc_unblock_dma_common ,
. reset_status = tegra_mc_reset_status_common ,
2018-04-13 14:33:49 +03:00
} ;
static inline struct tegra_mc * reset_to_mc ( struct reset_controller_dev * rcdev )
{
return container_of ( rcdev , struct tegra_mc , reset ) ;
}
static const struct tegra_mc_reset * tegra_mc_reset_find ( struct tegra_mc * mc ,
unsigned long id )
{
unsigned int i ;
for ( i = 0 ; i < mc - > soc - > num_resets ; i + + )
if ( mc - > soc - > resets [ i ] . id = = id )
return & mc - > soc - > resets [ i ] ;
return NULL ;
}
static int tegra_mc_hotreset_assert ( struct reset_controller_dev * rcdev ,
unsigned long id )
{
struct tegra_mc * mc = reset_to_mc ( rcdev ) ;
const struct tegra_mc_reset_ops * rst_ops ;
const struct tegra_mc_reset * rst ;
int retries = 500 ;
int err ;
rst = tegra_mc_reset_find ( mc , id ) ;
if ( ! rst )
return - ENODEV ;
rst_ops = mc - > soc - > reset_ops ;
if ( ! rst_ops )
return - ENODEV ;
if ( rst_ops - > block_dma ) {
/* block clients DMA requests */
err = rst_ops - > block_dma ( mc , rst ) ;
if ( err ) {
2018-12-12 23:38:59 +03:00
dev_err ( mc - > dev , " failed to block %s DMA: %d \n " ,
2018-04-13 14:33:49 +03:00
rst - > name , err ) ;
return err ;
}
}
if ( rst_ops - > dma_idling ) {
/* wait for completion of the outstanding DMA requests */
while ( ! rst_ops - > dma_idling ( mc , rst ) ) {
if ( ! retries - - ) {
2018-12-12 23:38:59 +03:00
dev_err ( mc - > dev , " failed to flush %s DMA \n " ,
2018-04-13 14:33:49 +03:00
rst - > name ) ;
return - EBUSY ;
}
usleep_range ( 10 , 100 ) ;
}
}
if ( rst_ops - > hotreset_assert ) {
/* clear clients DMA requests sitting before arbitration */
err = rst_ops - > hotreset_assert ( mc , rst ) ;
if ( err ) {
2018-12-12 23:38:59 +03:00
dev_err ( mc - > dev , " failed to hot reset %s: %d \n " ,
2018-04-13 14:33:49 +03:00
rst - > name , err ) ;
return err ;
}
}
return 0 ;
}
static int tegra_mc_hotreset_deassert ( struct reset_controller_dev * rcdev ,
unsigned long id )
{
struct tegra_mc * mc = reset_to_mc ( rcdev ) ;
const struct tegra_mc_reset_ops * rst_ops ;
const struct tegra_mc_reset * rst ;
int err ;
rst = tegra_mc_reset_find ( mc , id ) ;
if ( ! rst )
return - ENODEV ;
rst_ops = mc - > soc - > reset_ops ;
if ( ! rst_ops )
return - ENODEV ;
if ( rst_ops - > hotreset_deassert ) {
/* take out client from hot reset */
err = rst_ops - > hotreset_deassert ( mc , rst ) ;
if ( err ) {
2018-12-12 23:38:59 +03:00
dev_err ( mc - > dev , " failed to deassert hot reset %s: %d \n " ,
2018-04-13 14:33:49 +03:00
rst - > name , err ) ;
return err ;
}
}
if ( rst_ops - > unblock_dma ) {
/* allow new DMA requests to proceed to arbitration */
err = rst_ops - > unblock_dma ( mc , rst ) ;
if ( err ) {
2018-12-12 23:38:59 +03:00
dev_err ( mc - > dev , " failed to unblock %s DMA : %d \n " ,
2018-04-13 14:33:49 +03:00
rst - > name , err ) ;
return err ;
}
}
return 0 ;
}
static int tegra_mc_hotreset_status ( struct reset_controller_dev * rcdev ,
unsigned long id )
{
struct tegra_mc * mc = reset_to_mc ( rcdev ) ;
const struct tegra_mc_reset_ops * rst_ops ;
const struct tegra_mc_reset * rst ;
rst = tegra_mc_reset_find ( mc , id ) ;
if ( ! rst )
return - ENODEV ;
rst_ops = mc - > soc - > reset_ops ;
if ( ! rst_ops )
return - ENODEV ;
return rst_ops - > reset_status ( mc , rst ) ;
}
static const struct reset_control_ops tegra_mc_reset_ops = {
. assert = tegra_mc_hotreset_assert ,
. deassert = tegra_mc_hotreset_deassert ,
. status = tegra_mc_hotreset_status ,
} ;
static int tegra_mc_reset_setup ( struct tegra_mc * mc )
{
int err ;
mc - > reset . ops = & tegra_mc_reset_ops ;
mc - > reset . owner = THIS_MODULE ;
mc - > reset . of_node = mc - > dev - > of_node ;
mc - > reset . of_reset_n_cells = 1 ;
mc - > reset . nr_resets = mc - > soc - > num_resets ;
err = reset_controller_register ( & mc - > reset ) ;
if ( err < 0 )
return err ;
return 0 ;
}
2014-04-16 09:24:44 +02:00
static int tegra_mc_setup_latency_allowance ( struct tegra_mc * mc )
{
unsigned long long tick ;
unsigned int i ;
u32 value ;
/* compute the number of MC clock cycles per tick */
2019-04-12 01:12:48 +03:00
tick = ( unsigned long long ) mc - > tick * clk_get_rate ( mc - > clk ) ;
2014-04-16 09:24:44 +02:00
do_div ( tick , NSEC_PER_SEC ) ;
2019-04-12 01:12:49 +03:00
value = mc_readl ( mc , MC_EMEM_ARB_CFG ) ;
2014-04-16 09:24:44 +02:00
value & = ~ MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK ;
value | = MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE ( tick ) ;
2019-04-12 01:12:49 +03:00
mc_writel ( mc , value , MC_EMEM_ARB_CFG ) ;
2014-04-16 09:24:44 +02:00
/* write latency allowance defaults */
for ( i = 0 ; i < mc - > soc - > num_clients ; i + + ) {
const struct tegra_mc_la * la = & mc - > soc - > clients [ i ] . la ;
u32 value ;
2019-04-12 01:12:49 +03:00
value = mc_readl ( mc , la - > reg ) ;
2014-04-16 09:24:44 +02:00
value & = ~ ( la - > mask < < la - > shift ) ;
value | = ( la - > def & la - > mask ) < < la - > shift ;
2019-04-12 01:12:49 +03:00
mc_writel ( mc , value , la - > reg ) ;
2014-04-16 09:24:44 +02:00
}
2019-04-12 01:12:47 +03:00
/* latch new values */
2019-04-12 01:12:49 +03:00
mc_writel ( mc , MC_TIMING_UPDATE , MC_TIMING_CONTROL ) ;
2019-04-12 01:12:47 +03:00
2014-04-16 09:24:44 +02:00
return 0 ;
}
2019-08-12 00:00:40 +03:00
int tegra_mc_write_emem_configuration ( struct tegra_mc * mc , unsigned long rate )
2015-03-12 15:48:02 +01:00
{
unsigned int i ;
struct tegra_mc_timing * timing = NULL ;
for ( i = 0 ; i < mc - > num_timings ; i + + ) {
if ( mc - > timings [ i ] . rate = = rate ) {
timing = & mc - > timings [ i ] ;
break ;
}
}
if ( ! timing ) {
dev_err ( mc - > dev , " no memory timing registered for rate %lu \n " ,
rate ) ;
2019-08-12 00:00:40 +03:00
return - EINVAL ;
2015-03-12 15:48:02 +01:00
}
for ( i = 0 ; i < mc - > soc - > num_emem_regs ; + + i )
mc_writel ( mc , timing - > emem_data [ i ] , mc - > soc - > emem_regs [ i ] ) ;
2019-08-12 00:00:40 +03:00
return 0 ;
2015-03-12 15:48:02 +01:00
}
unsigned int tegra_mc_get_emem_device_count ( struct tegra_mc * mc )
{
u8 dram_count ;
dram_count = mc_readl ( mc , MC_EMEM_ADR_CFG ) ;
dram_count & = MC_EMEM_ADR_CFG_EMEM_NUMDEV ;
dram_count + + ;
return dram_count ;
}
static int load_one_timing ( struct tegra_mc * mc ,
struct tegra_mc_timing * timing ,
struct device_node * node )
{
int err ;
u32 tmp ;
err = of_property_read_u32 ( node , " clock-frequency " , & tmp ) ;
if ( err ) {
dev_err ( mc - > dev ,
2018-08-27 19:57:23 -05:00
" timing %pOFn: failed to read rate \n " , node ) ;
2015-03-12 15:48:02 +01:00
return err ;
}
timing - > rate = tmp ;
timing - > emem_data = devm_kcalloc ( mc - > dev , mc - > soc - > num_emem_regs ,
sizeof ( u32 ) , GFP_KERNEL ) ;
if ( ! timing - > emem_data )
return - ENOMEM ;
err = of_property_read_u32_array ( node , " nvidia,emem-configuration " ,
timing - > emem_data ,
mc - > soc - > num_emem_regs ) ;
if ( err ) {
dev_err ( mc - > dev ,
2018-08-27 19:57:23 -05:00
" timing %pOFn: failed to read EMEM configuration \n " ,
node ) ;
2015-03-12 15:48:02 +01:00
return err ;
}
return 0 ;
}
static int load_timings ( struct tegra_mc * mc , struct device_node * node )
{
struct device_node * child ;
struct tegra_mc_timing * timing ;
int child_count = of_get_child_count ( node ) ;
int i = 0 , err ;
mc - > timings = devm_kcalloc ( mc - > dev , child_count , sizeof ( * timing ) ,
GFP_KERNEL ) ;
if ( ! mc - > timings )
return - ENOMEM ;
mc - > num_timings = child_count ;
for_each_child_of_node ( node , child ) {
timing = & mc - > timings [ i + + ] ;
err = load_one_timing ( mc , timing , child ) ;
2016-01-25 22:53:07 +05:30
if ( err ) {
of_node_put ( child ) ;
2015-03-12 15:48:02 +01:00
return err ;
2016-01-25 22:53:07 +05:30
}
2015-03-12 15:48:02 +01:00
}
return 0 ;
}
static int tegra_mc_setup_timings ( struct tegra_mc * mc )
{
struct device_node * node ;
u32 ram_code , node_ram_code ;
int err ;
ram_code = tegra_read_ram_code ( ) ;
mc - > num_timings = 0 ;
for_each_child_of_node ( mc - > dev - > of_node , node ) {
err = of_property_read_u32 ( node , " nvidia,ram-code " ,
& node_ram_code ) ;
2015-10-09 19:47:40 +02:00
if ( err | | ( node_ram_code ! = ram_code ) )
2015-03-12 15:48:02 +01:00
continue ;
err = load_timings ( mc , node ) ;
2016-01-25 22:53:07 +05:30
of_node_put ( node ) ;
2015-03-12 15:48:02 +01:00
if ( err )
return err ;
break ;
}
if ( mc - > num_timings = = 0 )
dev_warn ( mc - > dev ,
" no memory timings for RAM code %u registered \n " ,
ram_code ) ;
return 0 ;
}
2014-04-16 09:24:44 +02:00
static const char * const status_names [ 32 ] = {
[ 1 ] = " External interrupt " ,
[ 6 ] = " EMEM address decode error " ,
2018-04-09 22:28:31 +03:00
[ 7 ] = " GART page fault " ,
2014-04-16 09:24:44 +02:00
[ 8 ] = " Security violation " ,
[ 9 ] = " EMEM arbitration error " ,
[ 10 ] = " Page fault " ,
[ 11 ] = " Invalid APB ASID update " ,
[ 12 ] = " VPR violation " ,
[ 13 ] = " Secure carveout violation " ,
[ 16 ] = " MTS carveout violation " ,
} ;
static const char * const error_names [ 8 ] = {
[ 2 ] = " EMEM decode error " ,
[ 3 ] = " TrustZone violation " ,
[ 4 ] = " Carveout violation " ,
[ 6 ] = " SMMU translation error " ,
} ;
static irqreturn_t tegra_mc_irq ( int irq , void * data )
{
struct tegra_mc * mc = data ;
2018-04-09 22:28:29 +03:00
unsigned long status ;
2014-04-16 09:24:44 +02:00
unsigned int bit ;
/* mask all interrupts to avoid flooding */
2018-04-09 22:28:29 +03:00
status = mc_readl ( mc , MC_INTSTATUS ) & mc - > soc - > intmask ;
2018-04-09 22:28:27 +03:00
if ( ! status )
return IRQ_NONE ;
2014-04-16 09:24:44 +02:00
for_each_set_bit ( bit , & status , 32 ) {
const char * error = status_names [ bit ] ? : " unknown " ;
const char * client = " unknown " , * desc ;
const char * direction , * secure ;
phys_addr_t addr = 0 ;
unsigned int i ;
char perm [ 7 ] ;
u8 id , type ;
u32 value ;
value = mc_readl ( mc , MC_ERR_STATUS ) ;
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if ( mc - > soc - > num_address_bits > 32 ) {
addr = ( ( value > > MC_ERR_STATUS_ADR_HI_SHIFT ) &
MC_ERR_STATUS_ADR_HI_MASK ) ;
addr < < = 32 ;
}
# endif
if ( value & MC_ERR_STATUS_RW )
direction = " write " ;
else
direction = " read " ;
if ( value & MC_ERR_STATUS_SECURITY )
secure = " secure " ;
else
secure = " " ;
2015-06-04 19:33:48 +00:00
id = value & mc - > soc - > client_id_mask ;
2014-04-16 09:24:44 +02:00
for ( i = 0 ; i < mc - > soc - > num_clients ; i + + ) {
if ( mc - > soc - > clients [ i ] . id = = id ) {
client = mc - > soc - > clients [ i ] . name ;
break ;
}
}
type = ( value & MC_ERR_STATUS_TYPE_MASK ) > >
MC_ERR_STATUS_TYPE_SHIFT ;
desc = error_names [ type ] ;
switch ( value & MC_ERR_STATUS_TYPE_MASK ) {
case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE :
perm [ 0 ] = ' ' ;
perm [ 1 ] = ' [ ' ;
if ( value & MC_ERR_STATUS_READABLE )
perm [ 2 ] = ' R ' ;
else
perm [ 2 ] = ' - ' ;
if ( value & MC_ERR_STATUS_WRITABLE )
perm [ 3 ] = ' W ' ;
else
perm [ 3 ] = ' - ' ;
if ( value & MC_ERR_STATUS_NONSECURE )
perm [ 4 ] = ' - ' ;
else
perm [ 4 ] = ' S ' ;
perm [ 5 ] = ' ] ' ;
perm [ 6 ] = ' \0 ' ;
break ;
default :
perm [ 0 ] = ' \0 ' ;
break ;
}
value = mc_readl ( mc , MC_ERR_ADR ) ;
addr | = value ;
dev_err_ratelimited ( mc - > dev , " %s: %s%s @%pa: %s (%s%s) \n " ,
client , secure , direction , & addr , error ,
desc , perm ) ;
}
/* clear interrupts */
mc_writel ( mc , status , MC_INTSTATUS ) ;
return IRQ_HANDLED ;
}
2018-04-09 22:28:31 +03:00
static __maybe_unused irqreturn_t tegra20_mc_irq ( int irq , void * data )
{
struct tegra_mc * mc = data ;
unsigned long status ;
unsigned int bit ;
/* mask all interrupts to avoid flooding */
status = mc_readl ( mc , MC_INTSTATUS ) & mc - > soc - > intmask ;
if ( ! status )
return IRQ_NONE ;
for_each_set_bit ( bit , & status , 32 ) {
const char * direction = " read " , * secure = " " ;
const char * error = status_names [ bit ] ;
const char * client , * desc ;
phys_addr_t addr ;
u32 value , reg ;
u8 id , type ;
switch ( BIT ( bit ) ) {
case MC_INT_DECERR_EMEM :
reg = MC_DECERR_EMEM_OTHERS_STATUS ;
value = mc_readl ( mc , reg ) ;
id = value & mc - > soc - > client_id_mask ;
desc = error_names [ 2 ] ;
if ( value & BIT ( 31 ) )
direction = " write " ;
break ;
case MC_INT_INVALID_GART_PAGE :
2018-12-12 23:38:53 +03:00
reg = MC_GART_ERROR_REQ ;
value = mc_readl ( mc , reg ) ;
id = ( value > > 1 ) & mc - > soc - > client_id_mask ;
desc = error_names [ 2 ] ;
if ( value & BIT ( 0 ) )
direction = " write " ;
break ;
2018-04-09 22:28:31 +03:00
case MC_INT_SECURITY_VIOLATION :
reg = MC_SECURITY_VIOLATION_STATUS ;
value = mc_readl ( mc , reg ) ;
id = value & mc - > soc - > client_id_mask ;
type = ( value & BIT ( 30 ) ) ? 4 : 3 ;
desc = error_names [ type ] ;
secure = " secure " ;
if ( value & BIT ( 31 ) )
direction = " write " ;
break ;
default :
continue ;
}
client = mc - > soc - > clients [ id ] . name ;
addr = mc_readl ( mc , reg + sizeof ( u32 ) ) ;
dev_err_ratelimited ( mc - > dev , " %s: %s%s @%pa: %s (%s) \n " ,
client , secure , direction , & addr , error ,
desc ) ;
}
/* clear interrupts */
mc_writel ( mc , status , MC_INTSTATUS ) ;
return IRQ_HANDLED ;
}
2014-04-16 09:24:44 +02:00
static int tegra_mc_probe ( struct platform_device * pdev )
{
struct resource * res ;
struct tegra_mc * mc ;
2018-04-09 22:28:31 +03:00
void * isr ;
2019-02-15 16:28:19 +01:00
u64 mask ;
2014-04-16 09:24:44 +02:00
int err ;
mc = devm_kzalloc ( & pdev - > dev , sizeof ( * mc ) , GFP_KERNEL ) ;
if ( ! mc )
return - ENOMEM ;
platform_set_drvdata ( pdev , mc ) ;
2018-04-13 14:33:49 +03:00
spin_lock_init ( & mc - > lock ) ;
2018-12-12 23:38:54 +03:00
mc - > soc = of_device_get_match_data ( & pdev - > dev ) ;
2014-04-16 09:24:44 +02:00
mc - > dev = & pdev - > dev ;
2019-02-15 16:28:19 +01:00
mask = DMA_BIT_MASK ( mc - > soc - > num_address_bits ) ;
err = dma_coerce_mask_and_coherent ( & pdev - > dev , mask ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to set DMA mask: %d \n " , err ) ;
return err ;
}
2014-04-16 09:24:44 +02:00
/* length of MC tick in nanoseconds */
mc - > tick = 30 ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
mc - > regs = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( mc - > regs ) )
return PTR_ERR ( mc - > regs ) ;
2018-12-12 23:38:52 +03:00
mc - > clk = devm_clk_get ( & pdev - > dev , " mc " ) ;
if ( IS_ERR ( mc - > clk ) ) {
dev_err ( & pdev - > dev , " failed to get MC clock: %ld \n " ,
PTR_ERR ( mc - > clk ) ) ;
return PTR_ERR ( mc - > clk ) ;
}
2018-04-09 22:28:31 +03:00
# ifdef CONFIG_ARCH_TEGRA_2x_SOC
if ( mc - > soc = = & tegra20_mc_soc ) {
isr = tegra20_mc_irq ;
} else
# endif
{
2019-08-12 00:00:41 +03:00
/* ensure that debug features are disabled */
mc_writel ( mc , 0x00000000 , MC_TIMING_CONTROL_DBG ) ;
2018-04-09 22:28:31 +03:00
err = tegra_mc_setup_latency_allowance ( mc ) ;
if ( err < 0 ) {
2018-12-12 23:38:59 +03:00
dev_err ( & pdev - > dev ,
" failed to setup latency allowance: %d \n " ,
2018-04-09 22:28:31 +03:00
err ) ;
return err ;
}
isr = tegra_mc_irq ;
2014-04-16 09:24:44 +02:00
2018-12-12 23:38:51 +03:00
err = tegra_mc_setup_timings ( mc ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to setup timings: %d \n " ,
err ) ;
return err ;
}
2015-03-12 15:48:02 +01:00
}
2014-04-16 09:24:44 +02:00
mc - > irq = platform_get_irq ( pdev , 0 ) ;
if ( mc - > irq < 0 ) {
dev_err ( & pdev - > dev , " interrupt not specified \n " ) ;
return mc - > irq ;
}
2018-12-12 23:38:59 +03:00
WARN ( ! mc - > soc - > client_id_mask , " missing client ID mask for this SoC \n " ) ;
2015-06-04 19:33:48 +00:00
2018-04-09 22:28:29 +03:00
mc_writel ( mc , mc - > soc - > intmask , MC_INTMASK ) ;
2014-04-16 09:24:44 +02:00
2018-12-12 23:38:58 +03:00
err = devm_request_irq ( & pdev - > dev , mc - > irq , isr , 0 ,
2018-04-09 22:28:28 +03:00
dev_name ( & pdev - > dev ) , mc ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to request IRQ#%u: %d \n " , mc - > irq ,
err ) ;
return err ;
}
2018-05-26 17:20:35 +03:00
err = tegra_mc_reset_setup ( mc ) ;
if ( err < 0 )
dev_err ( & pdev - > dev , " failed to register reset controller: %d \n " ,
err ) ;
2018-12-12 23:38:57 +03:00
if ( IS_ENABLED ( CONFIG_TEGRA_IOMMU_SMMU ) & & mc - > soc - > smmu ) {
2018-05-08 19:55:30 +03:00
mc - > smmu = tegra_smmu_probe ( & pdev - > dev , mc - > soc - > smmu , mc ) ;
2018-12-12 23:38:57 +03:00
if ( IS_ERR ( mc - > smmu ) ) {
2018-05-08 19:55:30 +03:00
dev_err ( & pdev - > dev , " failed to probe SMMU: %ld \n " ,
PTR_ERR ( mc - > smmu ) ) ;
2018-12-12 23:38:57 +03:00
mc - > smmu = NULL ;
}
2018-05-08 19:55:30 +03:00
}
2018-12-12 23:38:56 +03:00
if ( IS_ENABLED ( CONFIG_TEGRA_IOMMU_GART ) & & ! mc - > soc - > smmu ) {
mc - > gart = tegra_gart_probe ( & pdev - > dev , mc ) ;
if ( IS_ERR ( mc - > gart ) ) {
dev_err ( & pdev - > dev , " failed to probe GART: %ld \n " ,
PTR_ERR ( mc - > gart ) ) ;
mc - > gart = NULL ;
}
}
return 0 ;
}
static int tegra_mc_suspend ( struct device * dev )
{
struct tegra_mc * mc = dev_get_drvdata ( dev ) ;
int err ;
if ( IS_ENABLED ( CONFIG_TEGRA_IOMMU_GART ) & & mc - > gart ) {
err = tegra_gart_suspend ( mc - > gart ) ;
if ( err )
return err ;
}
2014-04-16 09:24:44 +02:00
return 0 ;
}
2018-12-12 23:38:56 +03:00
static int tegra_mc_resume ( struct device * dev )
{
struct tegra_mc * mc = dev_get_drvdata ( dev ) ;
int err ;
if ( IS_ENABLED ( CONFIG_TEGRA_IOMMU_GART ) & & mc - > gart ) {
err = tegra_gart_resume ( mc - > gart ) ;
if ( err )
return err ;
}
return 0 ;
}
static const struct dev_pm_ops tegra_mc_pm_ops = {
. suspend = tegra_mc_suspend ,
. resume = tegra_mc_resume ,
} ;
2014-04-16 09:24:44 +02:00
static struct platform_driver tegra_mc_driver = {
. driver = {
. name = " tegra-mc " ,
. of_match_table = tegra_mc_of_match ,
2018-12-12 23:38:56 +03:00
. pm = & tegra_mc_pm_ops ,
2014-04-16 09:24:44 +02:00
. suppress_bind_attrs = true ,
} ,
. prevent_deferred_probe = true ,
. probe = tegra_mc_probe ,
} ;
static int tegra_mc_init ( void )
{
return platform_driver_register ( & tegra_mc_driver ) ;
}
arch_initcall ( tegra_mc_init ) ;
MODULE_AUTHOR ( " Thierry Reding <treding@nvidia.com> " ) ;
MODULE_DESCRIPTION ( " NVIDIA Tegra Memory Controller driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;