2019-06-04 10:11:33 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2014-04-16 09:24:44 +02:00
/*
* Copyright ( C ) 2014 NVIDIA Corporation
*/
# ifndef __SOC_TEGRA_MC_H__
# define __SOC_TEGRA_MC_H__
2020-11-04 19:49:08 +03:00
# include <linux/bits.h>
2021-03-19 16:09:33 +03:00
# include <linux/debugfs.h>
2018-12-12 23:38:56 +03:00
# include <linux/err.h>
2020-11-04 19:49:08 +03:00
# include <linux/interconnect-provider.h>
2021-06-02 18:32:57 +02:00
# include <linux/irq.h>
2018-04-13 14:33:49 +03:00
# include <linux/reset-controller.h>
2014-04-16 09:24:44 +02:00
# include <linux/types.h>
struct clk ;
struct device ;
struct page ;
2015-03-12 15:48:02 +01:00
struct tegra_mc_timing {
unsigned long rate ;
u32 * emem_data ;
} ;
2014-04-16 09:24:44 +02:00
struct tegra_mc_client {
unsigned int id ;
const char * name ;
2021-06-02 18:32:52 +02:00
/*
* For Tegra210 and earlier , this is the SWGROUP ID used for IOVA translations in the
* Tegra SMMU , whereas on Tegra186 and later this is the ID used to override the ARM SMMU
* stream ID used for IOVA translations for the given memory client .
*/
union {
unsigned int swgroup ;
unsigned int sid ;
} ;
2014-04-16 09:24:44 +02:00
unsigned int fifo_size ;
2021-06-02 18:32:51 +02:00
struct {
2021-06-02 18:32:52 +02:00
/* Tegra SMMU enable (Tegra210 and earlier) */
2021-06-02 18:32:51 +02:00
struct {
unsigned int reg ;
unsigned int bit ;
} smmu ;
/* latency allowance */
struct {
unsigned int reg ;
unsigned int shift ;
unsigned int mask ;
unsigned int def ;
} la ;
2021-06-02 18:32:52 +02:00
/* stream ID overrides (Tegra186 and later) */
struct {
unsigned int override ;
unsigned int security ;
} sid ;
2021-06-02 18:32:51 +02:00
} regs ;
2014-04-16 09:24:44 +02:00
} ;
struct tegra_smmu_swgroup {
2015-01-23 09:45:35 +01:00
const char * name ;
2014-04-16 09:24:44 +02:00
unsigned int swgroup ;
unsigned int reg ;
} ;
2017-10-12 16:29:19 +02:00
struct tegra_smmu_group_soc {
const char * name ;
const unsigned int * swgroups ;
unsigned int num_swgroups ;
} ;
2014-04-16 09:24:44 +02:00
struct tegra_smmu_soc {
const struct tegra_mc_client * clients ;
unsigned int num_clients ;
const struct tegra_smmu_swgroup * swgroups ;
unsigned int num_swgroups ;
2017-10-12 16:29:19 +02:00
const struct tegra_smmu_group_soc * groups ;
unsigned int num_groups ;
2014-04-16 09:24:44 +02:00
bool supports_round_robin_arbitration ;
bool supports_request_limit ;
2015-08-06 14:20:31 +02:00
unsigned int num_tlb_lines ;
2014-04-16 09:24:44 +02:00
unsigned int num_asids ;
} ;
struct tegra_mc ;
struct tegra_smmu ;
2018-12-12 23:38:56 +03:00
struct gart_device ;
2014-04-16 09:24:44 +02:00
# ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu * tegra_smmu_probe ( struct device * dev ,
const struct tegra_smmu_soc * soc ,
struct tegra_mc * mc ) ;
2015-01-23 09:49:25 +01:00
void tegra_smmu_remove ( struct tegra_smmu * smmu ) ;
2014-04-16 09:24:44 +02:00
# else
static inline struct tegra_smmu *
tegra_smmu_probe ( struct device * dev , const struct tegra_smmu_soc * soc ,
struct tegra_mc * mc )
{
return NULL ;
}
2015-01-23 09:49:25 +01:00
static inline void tegra_smmu_remove ( struct tegra_smmu * smmu )
{
}
2014-04-16 09:24:44 +02:00
# endif
2018-12-12 23:38:56 +03:00
# ifdef CONFIG_TEGRA_IOMMU_GART
struct gart_device * tegra_gart_probe ( struct device * dev , struct tegra_mc * mc ) ;
int tegra_gart_suspend ( struct gart_device * gart ) ;
int tegra_gart_resume ( struct gart_device * gart ) ;
# else
static inline struct gart_device *
tegra_gart_probe ( struct device * dev , struct tegra_mc * mc )
{
return ERR_PTR ( - ENODEV ) ;
}
static inline int tegra_gart_suspend ( struct gart_device * gart )
{
return - ENODEV ;
}
static inline int tegra_gart_resume ( struct gart_device * gart )
{
return - ENODEV ;
}
# endif
2018-04-13 14:33:49 +03:00
struct tegra_mc_reset {
const char * name ;
unsigned long id ;
unsigned int control ;
unsigned int status ;
unsigned int reset ;
unsigned int bit ;
} ;
struct tegra_mc_reset_ops {
int ( * hotreset_assert ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
int ( * hotreset_deassert ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
int ( * block_dma ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
bool ( * dma_idling ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
int ( * unblock_dma ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
int ( * reset_status ) ( struct tegra_mc * mc ,
const struct tegra_mc_reset * rst ) ;
} ;
2020-11-04 19:49:08 +03:00
# define TEGRA_MC_ICC_TAG_DEFAULT 0
# define TEGRA_MC_ICC_TAG_ISO BIT(0)
struct tegra_mc_icc_ops {
int ( * set ) ( struct icc_node * src , struct icc_node * dst ) ;
int ( * aggregate ) ( struct icc_node * node , u32 tag , u32 avg_bw ,
u32 peak_bw , u32 * agg_avg , u32 * agg_peak ) ;
struct icc_node_data * ( * xlate_extended ) ( struct of_phandle_args * spec ,
void * data ) ;
} ;
2021-06-02 18:32:53 +02:00
struct tegra_mc_ops {
2021-06-02 18:32:55 +02:00
/*
* @ probe : Callback to set up SoC - specific bits of the memory controller . This is called
* after basic , common set up that is done by the SoC - agnostic bits .
*/
int ( * probe ) ( struct tegra_mc * mc ) ;
2021-06-02 18:33:00 +02:00
void ( * remove ) ( struct tegra_mc * mc ) ;
2021-06-02 18:32:54 +02:00
int ( * suspend ) ( struct tegra_mc * mc ) ;
int ( * resume ) ( struct tegra_mc * mc ) ;
2021-06-02 18:32:57 +02:00
irqreturn_t ( * handle_irq ) ( int irq , void * data ) ;
2021-06-03 18:46:24 +02:00
int ( * probe_device ) ( struct tegra_mc * mc , struct device * dev ) ;
2021-06-02 18:32:53 +02:00
} ;
2014-04-16 09:24:44 +02:00
struct tegra_mc_soc {
const struct tegra_mc_client * clients ;
unsigned int num_clients ;
2015-03-12 15:48:02 +01:00
const unsigned long * emem_regs ;
2014-04-16 09:24:44 +02:00
unsigned int num_emem_regs ;
unsigned int num_address_bits ;
unsigned int atom_size ;
2022-05-06 15:23:12 +02:00
u16 client_id_mask ;
2022-05-06 15:23:11 +02:00
u8 num_channels ;
2015-06-04 19:33:48 +00:00
2014-04-16 09:24:44 +02:00
const struct tegra_smmu_soc * smmu ;
2018-04-09 22:28:29 +03:00
u32 intmask ;
2022-05-06 15:23:12 +02:00
u32 ch_intmask ;
u32 global_intstatus_channel_shift ;
bool has_addr_hi_reg ;
2018-04-13 14:33:49 +03:00
const struct tegra_mc_reset_ops * reset_ops ;
const struct tegra_mc_reset * resets ;
unsigned int num_resets ;
2020-11-04 19:49:08 +03:00
const struct tegra_mc_icc_ops * icc_ops ;
2021-06-02 18:32:53 +02:00
const struct tegra_mc_ops * ops ;
2014-04-16 09:24:44 +02:00
} ;
struct tegra_mc {
struct device * dev ;
struct tegra_smmu * smmu ;
2018-12-12 23:38:56 +03:00
struct gart_device * gart ;
2018-12-12 23:38:52 +03:00
void __iomem * regs ;
2022-05-06 15:23:11 +02:00
void __iomem * bcast_ch_regs ;
void __iomem * * ch_regs ;
2014-04-16 09:24:44 +02:00
struct clk * clk ;
int irq ;
const struct tegra_mc_soc * soc ;
unsigned long tick ;
2015-03-12 15:48:02 +01:00
struct tegra_mc_timing * timings ;
unsigned int num_timings ;
2018-04-13 14:33:49 +03:00
struct reset_controller_dev reset ;
2020-11-04 19:49:08 +03:00
struct icc_provider provider ;
2018-04-13 14:33:49 +03:00
spinlock_t lock ;
2021-03-19 16:09:33 +03:00
struct {
struct dentry * root ;
} debugfs ;
2014-04-16 09:24:44 +02:00
} ;
2019-08-12 00:00:40 +03:00
int tegra_mc_write_emem_configuration ( struct tegra_mc * mc , unsigned long rate ) ;
2015-03-12 15:48:02 +01:00
unsigned int tegra_mc_get_emem_device_count ( struct tegra_mc * mc ) ;
2020-11-04 19:49:04 +03:00
# ifdef CONFIG_TEGRA_MC
struct tegra_mc * devm_tegra_memory_controller_get ( struct device * dev ) ;
2021-06-18 13:18:46 +02:00
int tegra_mc_probe_device ( struct tegra_mc * mc , struct device * dev ) ;
2020-11-04 19:49:04 +03:00
# else
static inline struct tegra_mc *
devm_tegra_memory_controller_get ( struct device * dev )
{
2020-11-11 04:14:31 +03:00
return ERR_PTR ( - ENODEV ) ;
2020-11-04 19:49:04 +03:00
}
2021-06-18 13:18:46 +02:00
static inline int
tegra_mc_probe_device ( struct tegra_mc * mc , struct device * dev )
{
return - ENODEV ;
}
# endif
2021-06-03 18:46:24 +02:00
2014-04-16 09:24:44 +02:00
# endif /* __SOC_TEGRA_MC_H__ */