2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-02-18 06:17:12 +04:00
/*
* drivers / dma / fsl - edma . c
*
* Copyright 2013 - 2014 Freescale Semiconductor , Inc .
*
* Driver for the Freescale eDMA engine with flexible channel multiplexing
* capability for DMA request sources . The eDMA block can be found on some
* Vybrid and Layerscape SoCs .
*/
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/clk.h>
# include <linux/of.h>
# include <linux/of_device.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/of_dma.h>
2018-08-19 20:27:13 +03:00
# include "fsl-edma-common.h"
2014-02-18 06:17:12 +04:00
2019-07-31 20:36:59 +03:00
static void fsl_edma_synchronize ( struct dma_chan * chan )
{
struct fsl_edma_chan * fsl_chan = to_fsl_edma_chan ( chan ) ;
vchan_synchronize ( & fsl_chan - > vchan ) ;
}
2014-02-18 06:17:12 +04:00
static irqreturn_t fsl_edma_tx_handler ( int irq , void * dev_id )
{
struct fsl_edma_engine * fsl_edma = dev_id ;
unsigned int intr , ch ;
2018-08-19 20:27:14 +03:00
struct edma_regs * regs = & fsl_edma - > regs ;
2014-02-18 06:17:12 +04:00
struct fsl_edma_chan * fsl_chan ;
2018-08-19 20:27:14 +03:00
intr = edma_readl ( fsl_edma , regs - > intl ) ;
2014-02-18 06:17:12 +04:00
if ( ! intr )
return IRQ_NONE ;
for ( ch = 0 ; ch < fsl_edma - > n_chans ; ch + + ) {
if ( intr & ( 0x1 < < ch ) ) {
2018-08-19 20:27:14 +03:00
edma_writeb ( fsl_edma , EDMA_CINT_CINT ( ch ) , regs - > cint ) ;
2014-02-18 06:17:12 +04:00
fsl_chan = & fsl_edma - > chans [ ch ] ;
spin_lock ( & fsl_chan - > vchan . lock ) ;
if ( ! fsl_chan - > edesc - > iscyclic ) {
list_del ( & fsl_chan - > edesc - > vdesc . node ) ;
vchan_cookie_complete ( & fsl_chan - > edesc - > vdesc ) ;
fsl_chan - > edesc = NULL ;
fsl_chan - > status = DMA_COMPLETE ;
2015-10-30 14:03:58 +03:00
fsl_chan - > idle = true ;
2014-02-18 06:17:12 +04:00
} else {
vchan_cyclic_callback ( & fsl_chan - > edesc - > vdesc ) ;
}
if ( ! fsl_chan - > edesc )
fsl_edma_xfer_desc ( fsl_chan ) ;
spin_unlock ( & fsl_chan - > vchan . lock ) ;
}
}
return IRQ_HANDLED ;
}
static irqreturn_t fsl_edma_err_handler ( int irq , void * dev_id )
{
struct fsl_edma_engine * fsl_edma = dev_id ;
unsigned int err , ch ;
2018-08-19 20:27:14 +03:00
struct edma_regs * regs = & fsl_edma - > regs ;
2014-02-18 06:17:12 +04:00
2018-08-19 20:27:14 +03:00
err = edma_readl ( fsl_edma , regs - > errl ) ;
2014-02-18 06:17:12 +04:00
if ( ! err )
return IRQ_NONE ;
for ( ch = 0 ; ch < fsl_edma - > n_chans ; ch + + ) {
if ( err & ( 0x1 < < ch ) ) {
fsl_edma_disable_request ( & fsl_edma - > chans [ ch ] ) ;
2018-08-19 20:27:14 +03:00
edma_writeb ( fsl_edma , EDMA_CERR_CERR ( ch ) , regs - > cerr ) ;
2014-02-18 06:17:12 +04:00
fsl_edma - > chans [ ch ] . status = DMA_ERROR ;
2015-10-30 14:03:58 +03:00
fsl_edma - > chans [ ch ] . idle = true ;
2014-02-18 06:17:12 +04:00
}
}
return IRQ_HANDLED ;
}
static irqreturn_t fsl_edma_irq_handler ( int irq , void * dev_id )
{
if ( fsl_edma_tx_handler ( irq , dev_id ) = = IRQ_HANDLED )
return IRQ_HANDLED ;
return fsl_edma_err_handler ( irq , dev_id ) ;
}
static struct dma_chan * fsl_edma_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
struct fsl_edma_engine * fsl_edma = ofdma - > of_dma_data ;
2014-02-21 10:50:06 +04:00
struct dma_chan * chan , * _chan ;
2015-10-30 14:03:58 +03:00
struct fsl_edma_chan * fsl_chan ;
2019-06-25 12:43:19 +03:00
u32 dmamux_nr = fsl_edma - > drvdata - > dmamuxs ;
unsigned long chans_per_mux = fsl_edma - > n_chans / dmamux_nr ;
2014-02-18 06:17:12 +04:00
if ( dma_spec - > args_count ! = 2 )
return NULL ;
mutex_lock ( & fsl_edma - > fsl_edma_mutex ) ;
2014-02-21 10:50:06 +04:00
list_for_each_entry_safe ( chan , _chan , & fsl_edma - > dma_dev . channels , device_node ) {
2014-02-18 06:17:12 +04:00
if ( chan - > client_count )
continue ;
2014-07-01 12:41:03 +04:00
if ( ( chan - > chan_id / chans_per_mux ) = = dma_spec - > args [ 0 ] ) {
2014-02-18 06:17:12 +04:00
chan = dma_get_slave_channel ( chan ) ;
if ( chan ) {
chan - > device - > privatecnt + + ;
2015-10-30 14:03:58 +03:00
fsl_chan = to_fsl_edma_chan ( chan ) ;
fsl_chan - > slave_id = dma_spec - > args [ 1 ] ;
fsl_edma_chan_mux ( fsl_chan , fsl_chan - > slave_id ,
true ) ;
2014-02-18 06:17:12 +04:00
mutex_unlock ( & fsl_edma - > fsl_edma_mutex ) ;
return chan ;
}
}
}
mutex_unlock ( & fsl_edma - > fsl_edma_mutex ) ;
return NULL ;
}
static int
fsl_edma_irq_init ( struct platform_device * pdev , struct fsl_edma_engine * fsl_edma )
{
int ret ;
fsl_edma - > txirq = platform_get_irq_byname ( pdev , " edma-tx " ) ;
2019-07-30 21:15:10 +03:00
if ( fsl_edma - > txirq < 0 )
2014-02-18 06:17:12 +04:00
return fsl_edma - > txirq ;
fsl_edma - > errirq = platform_get_irq_byname ( pdev , " edma-err " ) ;
2019-07-30 21:15:10 +03:00
if ( fsl_edma - > errirq < 0 )
2014-02-18 06:17:12 +04:00
return fsl_edma - > errirq ;
if ( fsl_edma - > txirq = = fsl_edma - > errirq ) {
ret = devm_request_irq ( & pdev - > dev , fsl_edma - > txirq ,
fsl_edma_irq_handler , 0 , " eDMA " , fsl_edma ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Can't register eDMA IRQ. \n " ) ;
2019-05-04 12:52:25 +03:00
return ret ;
2014-02-18 06:17:12 +04:00
}
} else {
ret = devm_request_irq ( & pdev - > dev , fsl_edma - > txirq ,
fsl_edma_tx_handler , 0 , " eDMA tx " , fsl_edma ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Can't register eDMA tx IRQ. \n " ) ;
2019-05-04 12:52:25 +03:00
return ret ;
2014-02-18 06:17:12 +04:00
}
ret = devm_request_irq ( & pdev - > dev , fsl_edma - > errirq ,
fsl_edma_err_handler , 0 , " eDMA err " , fsl_edma ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Can't register eDMA err IRQ. \n " ) ;
2019-05-04 12:52:25 +03:00
return ret ;
2014-02-18 06:17:12 +04:00
}
}
return 0 ;
}
2019-07-24 10:20:34 +03:00
static int
fsl_edma2_irq_init ( struct platform_device * pdev ,
struct fsl_edma_engine * fsl_edma )
{
int i , ret , irq ;
int count ;
count = platform_irq_count ( pdev ) ;
dev_dbg ( & pdev - > dev , " %s Found %d interrupts \r \n " , __func__ , count ) ;
if ( count < = 2 ) {
dev_err ( & pdev - > dev , " Interrupts in DTS not correct. \n " ) ;
return - EINVAL ;
}
/*
* 16 channel independent interrupts + 1 error interrupt on i . mx7ulp .
* 2 channel share one interrupt , for example , ch0 / ch16 , ch1 / ch17 . . .
* For now , just simply request irq without IRQF_SHARED flag , since 16
* channels are enough on i . mx7ulp whose M4 domain own some peripherals .
*/
for ( i = 0 ; i < count ; i + + ) {
irq = platform_get_irq ( pdev , i ) ;
if ( irq < 0 )
return - ENXIO ;
sprintf ( fsl_edma - > chans [ i ] . chan_name , " eDMA2-CH%02d " , i ) ;
/* The last IRQ is for eDMA err */
if ( i = = count - 1 )
ret = devm_request_irq ( & pdev - > dev , irq ,
fsl_edma_err_handler ,
0 , " eDMA2-ERR " , fsl_edma ) ;
else
ret = devm_request_irq ( & pdev - > dev , irq ,
fsl_edma_tx_handler , 0 ,
fsl_edma - > chans [ i ] . chan_name ,
fsl_edma ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2016-07-01 15:04:14 +03:00
static void fsl_edma_irq_exit (
struct platform_device * pdev , struct fsl_edma_engine * fsl_edma )
{
if ( fsl_edma - > txirq = = fsl_edma - > errirq ) {
devm_free_irq ( & pdev - > dev , fsl_edma - > txirq , fsl_edma ) ;
} else {
devm_free_irq ( & pdev - > dev , fsl_edma - > txirq , fsl_edma ) ;
devm_free_irq ( & pdev - > dev , fsl_edma - > errirq , fsl_edma ) ;
}
}
2017-12-14 14:50:51 +03:00
static void fsl_disable_clocks ( struct fsl_edma_engine * fsl_edma , int nr_clocks )
2016-06-07 20:38:34 +03:00
{
int i ;
2017-12-14 14:50:51 +03:00
for ( i = 0 ; i < nr_clocks ; i + + )
2016-06-07 20:38:34 +03:00
clk_disable_unprepare ( fsl_edma - > muxclk [ i ] ) ;
}
2019-06-25 12:43:19 +03:00
static struct fsl_edma_drvdata vf610_data = {
. version = v1 ,
. dmamuxs = DMAMUX_NR ,
. setup_irq = fsl_edma_irq_init ,
} ;
2019-07-24 10:20:34 +03:00
static struct fsl_edma_drvdata imx7ulp_data = {
. version = v3 ,
. dmamuxs = 1 ,
. has_dmaclk = true ,
. setup_irq = fsl_edma2_irq_init ,
} ;
2019-06-25 12:43:19 +03:00
static const struct of_device_id fsl_edma_dt_ids [ ] = {
{ . compatible = " fsl,vf610-edma " , . data = & vf610_data } ,
2019-07-24 10:20:34 +03:00
{ . compatible = " fsl,imx7ulp-edma " , . data = & imx7ulp_data } ,
2019-06-25 12:43:19 +03:00
{ /* sentinel */ }
} ;
MODULE_DEVICE_TABLE ( of , fsl_edma_dt_ids ) ;
2014-02-18 06:17:12 +04:00
static int fsl_edma_probe ( struct platform_device * pdev )
{
2019-06-25 12:43:19 +03:00
const struct of_device_id * of_id =
of_match_device ( fsl_edma_dt_ids , & pdev - > dev ) ;
2014-02-18 06:17:12 +04:00
struct device_node * np = pdev - > dev . of_node ;
struct fsl_edma_engine * fsl_edma ;
2019-06-25 12:43:19 +03:00
const struct fsl_edma_drvdata * drvdata = NULL ;
2014-02-18 06:17:12 +04:00
struct fsl_edma_chan * fsl_chan ;
2018-08-19 20:27:14 +03:00
struct edma_regs * regs ;
2014-02-18 06:17:12 +04:00
struct resource * res ;
int len , chans ;
int ret , i ;
2019-06-25 12:43:19 +03:00
if ( of_id )
drvdata = of_id - > data ;
if ( ! drvdata ) {
dev_err ( & pdev - > dev , " unable to find driver data \n " ) ;
return - EINVAL ;
}
2014-02-18 06:17:12 +04:00
ret = of_property_read_u32 ( np , " dma-channels " , & chans ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Can't get dma-channels. \n " ) ;
return ret ;
}
len = sizeof ( * fsl_edma ) + sizeof ( * fsl_chan ) * chans ;
fsl_edma = devm_kzalloc ( & pdev - > dev , len , GFP_KERNEL ) ;
if ( ! fsl_edma )
return - ENOMEM ;
2019-06-25 12:43:19 +03:00
fsl_edma - > drvdata = drvdata ;
2014-02-18 06:17:12 +04:00
fsl_edma - > n_chans = chans ;
mutex_init ( & fsl_edma - > fsl_edma_mutex ) ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
fsl_edma - > membase = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( fsl_edma - > membase ) )
return PTR_ERR ( fsl_edma - > membase ) ;
2018-08-19 20:27:14 +03:00
fsl_edma_setup_regs ( fsl_edma ) ;
regs = & fsl_edma - > regs ;
2019-07-24 10:20:34 +03:00
if ( drvdata - > has_dmaclk ) {
fsl_edma - > dmaclk = devm_clk_get ( & pdev - > dev , " dma " ) ;
if ( IS_ERR ( fsl_edma - > dmaclk ) ) {
dev_err ( & pdev - > dev , " Missing DMA block clock. \n " ) ;
return PTR_ERR ( fsl_edma - > dmaclk ) ;
}
ret = clk_prepare_enable ( fsl_edma - > dmaclk ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " DMA clk block failed. \n " ) ;
return ret ;
}
}
2019-06-25 12:43:19 +03:00
for ( i = 0 ; i < fsl_edma - > drvdata - > dmamuxs ; i + + ) {
2014-02-18 06:17:12 +04:00
char clkname [ 32 ] ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 1 + i ) ;
fsl_edma - > muxbase [ i ] = devm_ioremap_resource ( & pdev - > dev , res ) ;
2017-12-14 14:50:51 +03:00
if ( IS_ERR ( fsl_edma - > muxbase [ i ] ) ) {
/* on error: disable all previously enabled clks */
fsl_disable_clocks ( fsl_edma , i ) ;
2014-02-18 06:17:12 +04:00
return PTR_ERR ( fsl_edma - > muxbase [ i ] ) ;
2017-12-14 14:50:51 +03:00
}
2014-02-18 06:17:12 +04:00
sprintf ( clkname , " dmamux%d " , i ) ;
fsl_edma - > muxclk [ i ] = devm_clk_get ( & pdev - > dev , clkname ) ;
if ( IS_ERR ( fsl_edma - > muxclk [ i ] ) ) {
dev_err ( & pdev - > dev , " Missing DMAMUX block clock. \n " ) ;
2017-12-14 14:50:51 +03:00
/* on error: disable all previously enabled clks */
fsl_disable_clocks ( fsl_edma , i ) ;
2014-02-18 06:17:12 +04:00
return PTR_ERR ( fsl_edma - > muxclk [ i ] ) ;
}
ret = clk_prepare_enable ( fsl_edma - > muxclk [ i ] ) ;
2017-12-14 14:50:51 +03:00
if ( ret )
/* on error: disable all previously enabled clks */
fsl_disable_clocks ( fsl_edma , i ) ;
2014-02-18 06:17:12 +04:00
}
fsl_edma - > big_endian = of_property_read_bool ( np , " big-endian " ) ;
INIT_LIST_HEAD ( & fsl_edma - > dma_dev . channels ) ;
for ( i = 0 ; i < fsl_edma - > n_chans ; i + + ) {
struct fsl_edma_chan * fsl_chan = & fsl_edma - > chans [ i ] ;
fsl_chan - > edma = fsl_edma ;
2015-10-30 14:03:58 +03:00
fsl_chan - > pm_state = RUNNING ;
fsl_chan - > slave_id = 0 ;
fsl_chan - > idle = true ;
2019-01-18 13:06:23 +03:00
fsl_chan - > dma_dir = DMA_NONE ;
2014-02-18 06:17:12 +04:00
fsl_chan - > vchan . desc_free = fsl_edma_free_desc ;
vchan_init ( & fsl_chan - > vchan , & fsl_edma - > dma_dev ) ;
2018-08-19 20:27:14 +03:00
edma_writew ( fsl_edma , 0x0 , & regs - > tcd [ i ] . csr ) ;
2014-02-18 06:17:12 +04:00
fsl_edma_chan_mux ( fsl_chan , 0 , false ) ;
}
2018-08-19 20:27:14 +03:00
edma_writel ( fsl_edma , ~ 0 , regs - > intl ) ;
2019-06-25 12:43:19 +03:00
ret = fsl_edma - > drvdata - > setup_irq ( pdev , fsl_edma ) ;
2015-06-07 22:46:10 +03:00
if ( ret )
return ret ;
2014-02-18 06:17:12 +04:00
dma_cap_set ( DMA_PRIVATE , fsl_edma - > dma_dev . cap_mask ) ;
dma_cap_set ( DMA_SLAVE , fsl_edma - > dma_dev . cap_mask ) ;
dma_cap_set ( DMA_CYCLIC , fsl_edma - > dma_dev . cap_mask ) ;
fsl_edma - > dma_dev . dev = & pdev - > dev ;
fsl_edma - > dma_dev . device_alloc_chan_resources
= fsl_edma_alloc_chan_resources ;
fsl_edma - > dma_dev . device_free_chan_resources
= fsl_edma_free_chan_resources ;
fsl_edma - > dma_dev . device_tx_status = fsl_edma_tx_status ;
fsl_edma - > dma_dev . device_prep_slave_sg = fsl_edma_prep_slave_sg ;
fsl_edma - > dma_dev . device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic ;
2014-11-17 16:42:15 +03:00
fsl_edma - > dma_dev . device_config = fsl_edma_slave_config ;
fsl_edma - > dma_dev . device_pause = fsl_edma_pause ;
fsl_edma - > dma_dev . device_resume = fsl_edma_resume ;
fsl_edma - > dma_dev . device_terminate_all = fsl_edma_terminate_all ;
2019-07-31 20:36:59 +03:00
fsl_edma - > dma_dev . device_synchronize = fsl_edma_synchronize ;
2014-02-18 06:17:12 +04:00
fsl_edma - > dma_dev . device_issue_pending = fsl_edma_issue_pending ;
2014-11-17 16:42:46 +03:00
fsl_edma - > dma_dev . src_addr_widths = FSL_EDMA_BUSWIDTHS ;
fsl_edma - > dma_dev . dst_addr_widths = FSL_EDMA_BUSWIDTHS ;
fsl_edma - > dma_dev . directions = BIT ( DMA_DEV_TO_MEM ) | BIT ( DMA_MEM_TO_DEV ) ;
2014-02-18 06:17:12 +04:00
platform_set_drvdata ( pdev , fsl_edma ) ;
ret = dma_async_device_register ( & fsl_edma - > dma_dev ) ;
if ( ret ) {
2016-06-07 20:38:35 +03:00
dev_err ( & pdev - > dev ,
" Can't register Freescale eDMA engine. (%d) \n " , ret ) ;
2019-06-25 12:43:19 +03:00
fsl_disable_clocks ( fsl_edma , fsl_edma - > drvdata - > dmamuxs ) ;
2014-02-18 06:17:12 +04:00
return ret ;
}
ret = of_dma_controller_register ( np , fsl_edma_xlate , fsl_edma ) ;
if ( ret ) {
2016-06-07 20:38:35 +03:00
dev_err ( & pdev - > dev ,
" Can't register Freescale eDMA of_dma. (%d) \n " , ret ) ;
2014-02-18 06:17:12 +04:00
dma_async_device_unregister ( & fsl_edma - > dma_dev ) ;
2019-06-25 12:43:19 +03:00
fsl_disable_clocks ( fsl_edma , fsl_edma - > drvdata - > dmamuxs ) ;
2014-02-18 06:17:12 +04:00
return ret ;
}
/* enable round robin arbitration */
2018-08-19 20:27:14 +03:00
edma_writel ( fsl_edma , EDMA_CR_ERGA | EDMA_CR_ERCA , regs - > cr ) ;
2014-02-18 06:17:12 +04:00
return 0 ;
}
static int fsl_edma_remove ( struct platform_device * pdev )
{
struct device_node * np = pdev - > dev . of_node ;
struct fsl_edma_engine * fsl_edma = platform_get_drvdata ( pdev ) ;
2016-07-01 15:04:14 +03:00
fsl_edma_irq_exit ( pdev , fsl_edma ) ;
2016-07-02 12:28:30 +03:00
fsl_edma_cleanup_vchan ( & fsl_edma - > dma_dev ) ;
2014-02-18 06:17:12 +04:00
of_dma_controller_free ( np ) ;
dma_async_device_unregister ( & fsl_edma - > dma_dev ) ;
2019-06-25 12:43:19 +03:00
fsl_disable_clocks ( fsl_edma , fsl_edma - > drvdata - > dmamuxs ) ;
2014-02-18 06:17:12 +04:00
return 0 ;
}
2015-10-30 14:03:58 +03:00
static int fsl_edma_suspend_late ( struct device * dev )
{
struct fsl_edma_engine * fsl_edma = dev_get_drvdata ( dev ) ;
struct fsl_edma_chan * fsl_chan ;
unsigned long flags ;
int i ;
for ( i = 0 ; i < fsl_edma - > n_chans ; i + + ) {
fsl_chan = & fsl_edma - > chans [ i ] ;
spin_lock_irqsave ( & fsl_chan - > vchan . lock , flags ) ;
/* Make sure chan is idle or will force disable. */
if ( unlikely ( ! fsl_chan - > idle ) ) {
dev_warn ( dev , " WARN: There is non-idle channel. " ) ;
fsl_edma_disable_request ( fsl_chan ) ;
fsl_edma_chan_mux ( fsl_chan , 0 , false ) ;
}
fsl_chan - > pm_state = SUSPENDED ;
spin_unlock_irqrestore ( & fsl_chan - > vchan . lock , flags ) ;
}
return 0 ;
}
static int fsl_edma_resume_early ( struct device * dev )
{
struct fsl_edma_engine * fsl_edma = dev_get_drvdata ( dev ) ;
struct fsl_edma_chan * fsl_chan ;
2018-08-19 20:27:14 +03:00
struct edma_regs * regs = & fsl_edma - > regs ;
2015-10-30 14:03:58 +03:00
int i ;
for ( i = 0 ; i < fsl_edma - > n_chans ; i + + ) {
fsl_chan = & fsl_edma - > chans [ i ] ;
fsl_chan - > pm_state = RUNNING ;
2018-08-19 20:27:14 +03:00
edma_writew ( fsl_edma , 0x0 , & regs - > tcd [ i ] . csr ) ;
2015-10-30 14:03:58 +03:00
if ( fsl_chan - > slave_id ! = 0 )
fsl_edma_chan_mux ( fsl_chan , fsl_chan - > slave_id , true ) ;
}
2018-08-19 20:27:14 +03:00
edma_writel ( fsl_edma , EDMA_CR_ERGA | EDMA_CR_ERCA , regs - > cr ) ;
2015-10-30 14:03:58 +03:00
return 0 ;
}
/*
* eDMA provides the service to others , so it should be suspend late
* and resume early . When eDMA suspend , all of the clients should stop
* the DMA data transmission and let the channel idle .
*/
static const struct dev_pm_ops fsl_edma_pm_ops = {
. suspend_late = fsl_edma_suspend_late ,
. resume_early = fsl_edma_resume_early ,
} ;
2014-02-18 06:17:12 +04:00
static struct platform_driver fsl_edma_driver = {
. driver = {
. name = " fsl-edma " ,
. of_match_table = fsl_edma_dt_ids ,
2015-10-30 14:03:58 +03:00
. pm = & fsl_edma_pm_ops ,
2014-02-18 06:17:12 +04:00
} ,
. probe = fsl_edma_probe ,
. remove = fsl_edma_remove ,
} ;
2014-04-04 08:27:55 +04:00
static int __init fsl_edma_init ( void )
{
return platform_driver_register ( & fsl_edma_driver ) ;
}
subsys_initcall ( fsl_edma_init ) ;
static void __exit fsl_edma_exit ( void )
{
platform_driver_unregister ( & fsl_edma_driver ) ;
}
module_exit ( fsl_edma_exit ) ;
2014-02-18 06:17:12 +04:00
MODULE_ALIAS ( " platform:fsl-edma " ) ;
MODULE_DESCRIPTION ( " Freescale eDMA engine driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;