2019-05-28 10:10:04 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-22 16:34:01 +02:00
/*
* Tegra host1x driver
*
* Copyright ( c ) 2010 - 2013 , NVIDIA Corporation .
*/
# include <linux/clk.h>
2016-02-26 18:06:52 +09:00
# include <linux/dma-mapping.h>
2017-03-21 08:54:21 +01:00
# include <linux/io.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/of_device.h>
# include <linux/of.h>
# include <linux/slab.h>
2013-03-22 16:34:01 +02:00
# define CREATE_TRACE_POINTS
# include <trace/events/host1x.h>
2016-12-14 13:16:14 +02:00
# undef CREATE_TRACE_POINTS
2013-03-22 16:34:01 +02:00
2013-10-14 14:43:22 +02:00
# include "bus.h"
2013-03-22 16:34:03 +02:00
# include "channel.h"
2013-03-22 16:34:04 +02:00
# include "debug.h"
2017-03-21 08:54:21 +01:00
# include "dev.h"
# include "intr.h"
2013-03-22 16:34:01 +02:00
# include "hw/host1x01.h"
2013-09-30 14:17:39 +02:00
# include "hw/host1x02.h"
2013-11-15 14:58:05 +01:00
# include "hw/host1x04.h"
2015-03-23 10:46:28 +01:00
# include "hw/host1x05.h"
2017-09-05 11:43:05 +03:00
# include "hw/host1x06.h"
2018-01-25 14:10:44 +02:00
# include "hw/host1x07.h"
2017-09-05 11:43:05 +03:00
void host1x_hypervisor_writel ( struct host1x * host1x , u32 v , u32 r )
{
writel ( v , host1x - > hv_regs + r ) ;
}
u32 host1x_hypervisor_readl ( struct host1x * host1x , u32 r )
{
return readl ( host1x - > hv_regs + r ) ;
}
2013-03-22 16:34:01 +02:00
void host1x_sync_writel ( struct host1x * host1x , u32 v , u32 r )
{
void __iomem * sync_regs = host1x - > regs + host1x - > info - > sync_offset ;
writel ( v , sync_regs + r ) ;
}
u32 host1x_sync_readl ( struct host1x * host1x , u32 r )
{
void __iomem * sync_regs = host1x - > regs + host1x - > info - > sync_offset ;
return readl ( sync_regs + r ) ;
}
2013-03-22 16:34:03 +02:00
void host1x_ch_writel ( struct host1x_channel * ch , u32 v , u32 r )
{
writel ( v , ch - > regs + r ) ;
}
u32 host1x_ch_readl ( struct host1x_channel * ch , u32 r )
{
return readl ( ch - > regs + r ) ;
}
2013-03-22 16:34:01 +02:00
static const struct host1x_info host1x01_info = {
2016-06-23 11:35:50 +02:00
. nb_channels = 8 ,
. nb_pts = 32 ,
. nb_mlocks = 16 ,
. nb_bases = 8 ,
. init = host1x01_init ,
. sync_offset = 0x3000 ,
. dma_mask = DMA_BIT_MASK ( 32 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = false ,
2019-09-05 11:39:05 +02:00
. has_hypervisor = false ,
. num_sid_entries = 0 ,
. sid_table = NULL ,
2013-03-22 16:34:01 +02:00
} ;
2013-09-30 14:17:39 +02:00
static const struct host1x_info host1x02_info = {
. nb_channels = 9 ,
. nb_pts = 32 ,
. nb_mlocks = 16 ,
. nb_bases = 12 ,
. init = host1x02_init ,
. sync_offset = 0x3000 ,
2016-02-26 18:06:52 +09:00
. dma_mask = DMA_BIT_MASK ( 32 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = false ,
2019-09-05 11:39:05 +02:00
. has_hypervisor = false ,
. num_sid_entries = 0 ,
. sid_table = NULL ,
2013-09-30 14:17:39 +02:00
} ;
2013-11-15 14:58:05 +01:00
static const struct host1x_info host1x04_info = {
. nb_channels = 12 ,
. nb_pts = 192 ,
. nb_mlocks = 16 ,
. nb_bases = 64 ,
. init = host1x04_init ,
. sync_offset = 0x2100 ,
2016-02-26 18:06:52 +09:00
. dma_mask = DMA_BIT_MASK ( 34 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = false ,
2019-09-05 11:39:05 +02:00
. has_hypervisor = false ,
. num_sid_entries = 0 ,
. sid_table = NULL ,
2013-11-15 14:58:05 +01:00
} ;
2015-03-23 10:46:28 +01:00
static const struct host1x_info host1x05_info = {
. nb_channels = 14 ,
. nb_pts = 192 ,
. nb_mlocks = 16 ,
. nb_bases = 64 ,
. init = host1x05_init ,
. sync_offset = 0x2100 ,
2016-02-26 18:06:52 +09:00
. dma_mask = DMA_BIT_MASK ( 34 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = false ,
2019-09-05 11:39:05 +02:00
. has_hypervisor = false ,
. num_sid_entries = 0 ,
. sid_table = NULL ,
2015-03-23 10:46:28 +01:00
} ;
2019-02-01 14:28:22 +01:00
static const struct host1x_sid_entry tegra186_sid_table [ ] = {
{
/* VIC */
. base = 0x1af0 ,
. offset = 0x30 ,
. limit = 0x34
} ,
} ;
2017-09-05 11:43:05 +03:00
static const struct host1x_info host1x06_info = {
. nb_channels = 63 ,
. nb_pts = 576 ,
. nb_mlocks = 24 ,
. nb_bases = 16 ,
. init = host1x06_init ,
. sync_offset = 0x0 ,
2019-02-01 14:28:28 +01:00
. dma_mask = DMA_BIT_MASK ( 40 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = true ,
2017-09-05 11:43:05 +03:00
. has_hypervisor = true ,
2019-02-01 14:28:22 +01:00
. num_sid_entries = ARRAY_SIZE ( tegra186_sid_table ) ,
. sid_table = tegra186_sid_table ,
} ;
static const struct host1x_sid_entry tegra194_sid_table [ ] = {
{
/* VIC */
. base = 0x1af0 ,
. offset = 0x30 ,
. limit = 0x34
} ,
2017-09-05 11:43:05 +03:00
} ;
2018-01-25 14:10:44 +02:00
static const struct host1x_info host1x07_info = {
. nb_channels = 63 ,
. nb_pts = 704 ,
. nb_mlocks = 32 ,
. nb_bases = 0 ,
. init = host1x07_init ,
. sync_offset = 0x0 ,
. dma_mask = DMA_BIT_MASK ( 40 ) ,
2019-10-28 13:37:14 +01:00
. has_wide_gather = true ,
2018-01-25 14:10:44 +02:00
. has_hypervisor = true ,
2019-02-01 14:28:22 +01:00
. num_sid_entries = ARRAY_SIZE ( tegra194_sid_table ) ,
. sid_table = tegra194_sid_table ,
2018-01-25 14:10:44 +02:00
} ;
2016-06-23 11:33:31 +02:00
static const struct of_device_id host1x_of_match [ ] = {
2018-01-25 14:10:44 +02:00
{ . compatible = " nvidia,tegra194-host1x " , . data = & host1x07_info , } ,
2017-09-05 11:43:05 +03:00
{ . compatible = " nvidia,tegra186-host1x " , . data = & host1x06_info , } ,
2015-03-23 10:46:28 +01:00
{ . compatible = " nvidia,tegra210-host1x " , . data = & host1x05_info , } ,
2013-11-15 14:58:05 +01:00
{ . compatible = " nvidia,tegra124-host1x " , . data = & host1x04_info , } ,
2013-09-30 14:17:39 +02:00
{ . compatible = " nvidia,tegra114-host1x " , . data = & host1x02_info , } ,
2013-03-22 16:34:01 +02:00
{ . compatible = " nvidia,tegra30-host1x " , . data = & host1x01_info , } ,
{ . compatible = " nvidia,tegra20-host1x " , . data = & host1x01_info , } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , host1x_of_match ) ;
2019-02-01 14:28:22 +01:00
static void host1x_setup_sid_table ( struct host1x * host )
{
const struct host1x_info * info = host - > info ;
unsigned int i ;
for ( i = 0 ; i < info - > num_sid_entries ; i + + ) {
const struct host1x_sid_entry * entry = & info - > sid_table [ i ] ;
host1x_hypervisor_writel ( host , entry - > offset , entry - > base ) ;
host1x_hypervisor_writel ( host , entry - > limit , entry - > base + 4 ) ;
}
}
2020-03-25 21:16:04 +01:00
static bool host1x_wants_iommu ( struct host1x * host1x )
{
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled , there ' s no need to enable
* IOMMU support . This can happen for example on Tegra20 , Tegra30
* and Tegra114 .
*
* Tegra124 and later can address up to 34 bits of physical memory and
* many platforms come equipped with more than 2 GiB of system memory ,
* which requires crossing the 4 GiB boundary . But there ' s a catch : on
* SoCs before Tegra186 ( i . e . Tegra124 and Tegra210 ) , the host1x can
* only address up to 32 bits of memory in GATHER opcodes , which means
* that command buffers need to either be in the first 2 GiB of system
* memory ( which could quickly lead to memory exhaustion ) , or command
* buffers need to be treated differently from other buffers ( which is
* not possible with the current ABI ) .
*
* A third option is to use the IOMMU in these cases to make sure all
* buffers will be mapped into a 32 - bit IOVA space that host1x can
* address . This allows all of the system memory to be used and works
* within the limitations of the host1x on these SoCs .
*
* In summary , default to enable IOMMU on Tegra124 and later . For any
* of the earlier SoCs , only use the IOMMU for additional safety when
* the host1x firewall is disabled .
*/
if ( host1x - > info - > dma_mask < = DMA_BIT_MASK ( 32 ) ) {
if ( IS_ENABLED ( CONFIG_TEGRA_HOST1X_FIREWALL ) )
return false ;
}
return true ;
}
2019-10-28 13:37:14 +01:00
static struct iommu_domain * host1x_iommu_attach ( struct host1x * host )
{
struct iommu_domain * domain = iommu_get_domain_for_dev ( host - > dev ) ;
int err ;
/*
2020-03-25 21:16:04 +01:00
* We may not always want to enable IOMMU support ( for example if the
* host1x firewall is already enabled and we don ' t support addressing
* more than 32 bits of physical memory ) , so check for that first .
*
* Similarly , if host1x is already attached to an IOMMU ( via the DMA
* API ) , don ' t try to attach again .
2019-10-28 13:37:14 +01:00
*/
2020-03-25 21:16:04 +01:00
if ( ! host1x_wants_iommu ( host ) | | domain )
2019-10-28 13:37:14 +01:00
return domain ;
host - > group = iommu_group_get ( host - > dev ) ;
if ( host - > group ) {
struct iommu_domain_geometry * geometry ;
dma_addr_t start , end ;
unsigned long order ;
err = iova_cache_get ( ) ;
if ( err < 0 )
goto put_group ;
host - > domain = iommu_domain_alloc ( & platform_bus_type ) ;
if ( ! host - > domain ) {
err = - ENOMEM ;
goto put_cache ;
}
err = iommu_attach_group ( host - > domain , host - > group ) ;
if ( err ) {
if ( err = = - ENODEV )
err = 0 ;
goto free_domain ;
}
geometry = & host - > domain - > geometry ;
start = geometry - > aperture_start & host - > info - > dma_mask ;
end = geometry - > aperture_end & host - > info - > dma_mask ;
order = __ffs ( host - > domain - > pgsize_bitmap ) ;
init_iova_domain ( & host - > iova , 1UL < < order , start > > order ) ;
host - > iova_end = end ;
domain = host - > domain ;
}
return domain ;
free_domain :
iommu_domain_free ( host - > domain ) ;
host - > domain = NULL ;
put_cache :
iova_cache_put ( ) ;
put_group :
iommu_group_put ( host - > group ) ;
host - > group = NULL ;
return ERR_PTR ( err ) ;
}
static int host1x_iommu_init ( struct host1x * host )
{
u64 mask = host - > info - > dma_mask ;
struct iommu_domain * domain ;
int err ;
domain = host1x_iommu_attach ( host ) ;
if ( IS_ERR ( domain ) ) {
err = PTR_ERR ( domain ) ;
dev_err ( host - > dev , " failed to attach to IOMMU: %d \n " , err ) ;
return err ;
}
/*
* If we ' re not behind an IOMMU make sure we don ' t get push buffers
* that are allocated outside of the range addressable by the GATHER
* opcode .
*
* Newer generations of Tegra ( Tegra186 and later ) support a wide
* variant of the GATHER opcode that allows addressing more bits .
*/
if ( ! domain & & ! host - > info - > has_wide_gather )
mask = DMA_BIT_MASK ( 32 ) ;
err = dma_coerce_mask_and_coherent ( host - > dev , mask ) ;
if ( err < 0 ) {
dev_err ( host - > dev , " failed to set DMA mask: %d \n " , err ) ;
return err ;
}
return 0 ;
}
static void host1x_iommu_exit ( struct host1x * host )
{
if ( host - > domain ) {
put_iova_domain ( & host - > iova ) ;
iommu_detach_group ( host - > domain , host - > group ) ;
iommu_domain_free ( host - > domain ) ;
host - > domain = NULL ;
iova_cache_put ( ) ;
iommu_group_put ( host - > group ) ;
host - > group = NULL ;
}
}
2013-03-22 16:34:01 +02:00
static int host1x_probe ( struct platform_device * pdev )
{
struct host1x * host ;
2017-09-05 11:43:05 +03:00
struct resource * regs , * hv_regs = NULL ;
2013-03-22 16:34:01 +02:00
int syncpt_irq ;
int err ;
2017-08-21 18:08:42 +02:00
host = devm_kzalloc ( & pdev - > dev , sizeof ( * host ) , GFP_KERNEL ) ;
if ( ! host )
return - ENOMEM ;
2013-03-22 16:34:01 +02:00
2017-08-21 18:08:42 +02:00
host - > info = of_device_get_match_data ( & pdev - > dev ) ;
2013-03-22 16:34:01 +02:00
2017-09-05 11:43:05 +03:00
if ( host - > info - > has_hypervisor ) {
regs = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " vm " ) ;
if ( ! regs ) {
dev_err ( & pdev - > dev , " failed to get vm registers \n " ) ;
return - ENXIO ;
}
hv_regs = platform_get_resource_byname ( pdev , IORESOURCE_MEM ,
" hypervisor " ) ;
if ( ! hv_regs ) {
dev_err ( & pdev - > dev ,
" failed to get hypervisor registers \n " ) ;
return - ENXIO ;
}
} else {
regs = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! regs ) {
dev_err ( & pdev - > dev , " failed to get registers \n " ) ;
return - ENXIO ;
}
2013-03-22 16:34:01 +02:00
}
syncpt_irq = platform_get_irq ( pdev , 0 ) ;
2019-11-02 15:57:44 +08:00
if ( syncpt_irq < 0 )
2017-08-08 00:08:06 -05:00
return syncpt_irq ;
2013-03-22 16:34:01 +02:00
2013-10-14 14:43:22 +02:00
mutex_init ( & host - > devices_lock ) ;
INIT_LIST_HEAD ( & host - > devices ) ;
INIT_LIST_HEAD ( & host - > list ) ;
2013-03-22 16:34:01 +02:00
host - > dev = & pdev - > dev ;
/* set common host1x device data */
platform_set_drvdata ( pdev , host ) ;
host - > regs = devm_ioremap_resource ( & pdev - > dev , regs ) ;
if ( IS_ERR ( host - > regs ) )
return PTR_ERR ( host - > regs ) ;
2017-09-05 11:43:05 +03:00
if ( host - > info - > has_hypervisor ) {
host - > hv_regs = devm_ioremap_resource ( & pdev - > dev , hv_regs ) ;
if ( IS_ERR ( host - > hv_regs ) )
return PTR_ERR ( host - > hv_regs ) ;
}
2019-09-09 14:28:46 +02:00
host - > dev - > dma_parms = & host - > dma_parms ;
dma_set_max_seg_size ( host - > dev , UINT_MAX ) ;
2013-03-22 16:34:01 +02:00
if ( host - > info - > init ) {
err = host - > info - > init ( host ) ;
if ( err )
return err ;
}
host - > clk = devm_clk_get ( & pdev - > dev , NULL ) ;
if ( IS_ERR ( host - > clk ) ) {
err = PTR_ERR ( host - > clk ) ;
2019-06-04 17:31:50 +02:00
if ( err ! = - EPROBE_DEFER )
dev_err ( & pdev - > dev , " failed to get clock: %d \n " , err ) ;
2013-03-22 16:34:01 +02:00
return err ;
}
2017-03-21 08:54:22 +01:00
host - > rst = devm_reset_control_get ( & pdev - > dev , " host1x " ) ;
if ( IS_ERR ( host - > rst ) ) {
2017-04-10 22:29:22 +02:00
err = PTR_ERR ( host - > rst ) ;
2017-03-21 08:54:22 +01:00
dev_err ( & pdev - > dev , " failed to get reset: %d \n " , err ) ;
return err ;
}
2019-10-28 13:37:13 +01:00
2019-10-28 13:37:14 +01:00
err = host1x_iommu_init ( host ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to setup IOMMU: %d \n " , err ) ;
return err ;
2016-12-14 13:16:14 +02:00
}
2017-06-15 02:18:42 +03:00
err = host1x_channel_list_init ( & host - > channel_list ,
host - > info - > nb_channels ) ;
2013-03-22 16:34:03 +02:00
if ( err ) {
dev_err ( & pdev - > dev , " failed to initialize channel list \n " ) ;
2019-10-28 13:37:14 +01:00
goto iommu_exit ;
2013-03-22 16:34:03 +02:00
}
2013-03-22 16:34:01 +02:00
err = clk_prepare_enable ( host - > clk ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to enable clock \n " ) ;
2019-10-28 13:37:14 +01:00
goto free_channels ;
2013-03-22 16:34:01 +02:00
}
2017-03-21 08:54:22 +01:00
err = reset_control_deassert ( host - > rst ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " failed to deassert reset: %d \n " , err ) ;
2019-10-28 13:37:14 +01:00
goto unprepare_disable ;
2017-03-21 08:54:22 +01:00
}
2013-03-22 16:34:01 +02:00
err = host1x_syncpt_init ( host ) ;
if ( err ) {
dev_err ( & pdev - > dev , " failed to initialize syncpts \n " ) ;
2019-10-28 13:37:14 +01:00
goto reset_assert ;
2013-03-22 16:34:01 +02:00
}
2013-03-22 16:34:02 +02:00
err = host1x_intr_init ( host , syncpt_irq ) ;
if ( err ) {
dev_err ( & pdev - > dev , " failed to initialize interrupts \n " ) ;
2019-10-28 13:37:14 +01:00
goto deinit_syncpt ;
2013-03-22 16:34:02 +02:00
}
2013-03-22 16:34:04 +02:00
host1x_debug_init ( host ) ;
2019-02-01 14:28:22 +01:00
if ( host - > info - > has_hypervisor )
host1x_setup_sid_table ( host ) ;
2013-10-14 14:43:22 +02:00
err = host1x_register ( host ) ;
if ( err < 0 )
2020-04-26 21:16:30 +02:00
goto deinit_debugfs ;
2013-03-22 16:34:07 +02:00
2020-06-12 17:00:59 +02:00
err = devm_of_platform_populate ( & pdev - > dev ) ;
if ( err < 0 )
goto unregister ;
2013-03-22 16:34:01 +02:00
return 0 ;
2013-03-22 16:34:02 +02:00
2020-06-12 17:00:59 +02:00
unregister :
host1x_unregister ( host ) ;
2020-04-26 21:16:30 +02:00
deinit_debugfs :
host1x_debug_deinit ( host ) ;
2013-10-14 14:43:22 +02:00
host1x_intr_deinit ( host ) ;
2019-10-28 13:37:14 +01:00
deinit_syncpt :
2013-03-22 16:34:02 +02:00
host1x_syncpt_deinit ( host ) ;
2019-10-28 13:37:14 +01:00
reset_assert :
2017-03-21 08:54:22 +01:00
reset_control_assert ( host - > rst ) ;
2019-10-28 13:37:14 +01:00
unprepare_disable :
2013-10-21 13:37:31 +08:00
clk_disable_unprepare ( host - > clk ) ;
2019-10-28 13:37:14 +01:00
free_channels :
2017-06-15 02:18:42 +03:00
host1x_channel_list_free ( & host - > channel_list ) ;
2019-10-28 13:37:14 +01:00
iommu_exit :
host1x_iommu_exit ( host ) ;
2016-12-14 13:16:14 +02:00
2013-03-22 16:34:02 +02:00
return err ;
2013-03-22 16:34:01 +02:00
}
2013-09-25 18:33:31 +02:00
static int host1x_remove ( struct platform_device * pdev )
2013-03-22 16:34:01 +02:00
{
struct host1x * host = platform_get_drvdata ( pdev ) ;
2013-10-14 14:43:22 +02:00
host1x_unregister ( host ) ;
2019-10-28 13:37:10 +01:00
host1x_debug_deinit ( host ) ;
2013-03-22 16:34:02 +02:00
host1x_intr_deinit ( host ) ;
2013-03-22 16:34:01 +02:00
host1x_syncpt_deinit ( host ) ;
2017-03-21 08:54:22 +01:00
reset_control_assert ( host - > rst ) ;
2013-03-22 16:34:01 +02:00
clk_disable_unprepare ( host - > clk ) ;
2019-10-28 13:37:14 +01:00
host1x_iommu_exit ( host ) ;
2016-12-14 13:16:14 +02:00
2013-03-22 16:34:01 +02:00
return 0 ;
}
2013-03-22 16:34:07 +02:00
static struct platform_driver tegra_host1x_driver = {
2013-03-22 16:34:01 +02:00
. driver = {
. name = " tegra-host1x " ,
. of_match_table = host1x_of_match ,
} ,
2013-09-25 18:33:31 +02:00
. probe = host1x_probe ,
. remove = host1x_remove ,
2013-03-22 16:34:01 +02:00
} ;
2015-12-02 17:24:20 +01:00
static struct platform_driver * const drivers [ ] = {
& tegra_host1x_driver ,
& tegra_mipi_driver ,
} ;
2013-03-22 16:34:07 +02:00
static int __init tegra_host1x_init ( void )
{
int err ;
2014-12-18 15:29:14 +01:00
err = bus_register ( & host1x_bus_type ) ;
2013-03-22 16:34:07 +02:00
if ( err < 0 )
return err ;
2015-12-02 17:24:20 +01:00
err = platform_register_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2013-09-02 09:48:53 +02:00
if ( err < 0 )
2015-12-02 17:24:20 +01:00
bus_unregister ( & host1x_bus_type ) ;
2013-03-22 16:34:07 +02:00
2013-09-02 09:48:53 +02:00
return err ;
2013-03-22 16:34:07 +02:00
}
module_init ( tegra_host1x_init ) ;
static void __exit tegra_host1x_exit ( void )
{
2015-12-02 17:24:20 +01:00
platform_unregister_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2014-12-18 15:29:14 +01:00
bus_unregister ( & host1x_bus_type ) ;
2013-03-22 16:34:07 +02:00
}
module_exit ( tegra_host1x_exit ) ;
2013-03-22 16:34:01 +02:00
2020-03-25 21:16:03 +01:00
/**
* host1x_get_dma_mask ( ) - query the supported DMA mask for host1x
* @ host1x : host1x instance
*
* Note that this returns the supported DMA mask for host1x , which can be
* different from the applicable DMA mask under certain circumstances .
*/
u64 host1x_get_dma_mask ( struct host1x * host1x )
{
return host1x - > info - > dma_mask ;
}
EXPORT_SYMBOL ( host1x_get_dma_mask ) ;
2013-03-22 16:34:07 +02:00
MODULE_AUTHOR ( " Thierry Reding <thierry.reding@avionic-design.de> " ) ;
2013-03-22 16:34:01 +02:00
MODULE_AUTHOR ( " Terje Bergstrom <tbergstrom@nvidia.com> " ) ;
MODULE_DESCRIPTION ( " Host1x driver for Tegra products " ) ;
MODULE_LICENSE ( " GPL " ) ;