2011-03-22 17:55:58 +03:00
/*
* drivers / usb / musb / ux500_dma . c
*
2013-04-03 12:45:02 +04:00
* U8500 DMA support code
2011-03-22 17:55:58 +03:00
*
* Copyright ( C ) 2009 STMicroelectronics
* Copyright ( C ) 2011 ST - Ericsson SA
* Authors :
* Mian Yousaf Kaukab < mian . yousaf . kaukab @ stericsson . com >
* Praveena Nadahally < praveen . nadahally @ stericsson . com >
* Rajaram Regupathy < ragupathy . rajaram @ stericsson . com >
*
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/device.h>
# include <linux/interrupt.h>
# include <linux/platform_device.h>
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/pfn.h>
2013-02-06 11:47:58 +04:00
# include <linux/sizes.h>
2012-08-24 17:19:33 +04:00
# include <linux/platform_data/usb-musb-ux500.h>
2011-03-22 17:55:58 +03:00
# include "musb_core.h"
2013-05-15 13:51:47 +04:00
static const char * iep_chan_names [ ] = { " iep_1_9 " , " iep_2_10 " , " iep_3_11 " , " iep_4_12 " ,
" iep_5_13 " , " iep_6_14 " , " iep_7_15 " , " iep_8 " } ;
static const char * oep_chan_names [ ] = { " oep_1_9 " , " oep_2_10 " , " oep_3_11 " , " oep_4_12 " ,
" oep_5_13 " , " oep_6_14 " , " oep_7_15 " , " oep_8 " } ;
2011-03-22 17:55:58 +03:00
struct ux500_dma_channel {
struct dma_channel channel ;
struct ux500_dma_controller * controller ;
struct musb_hw_ep * hw_ep ;
struct dma_chan * dma_chan ;
unsigned int cur_len ;
dma_cookie_t cookie ;
u8 ch_num ;
u8 is_tx ;
u8 is_allocated ;
} ;
struct ux500_dma_controller {
struct dma_controller controller ;
2013-05-15 13:51:43 +04:00
struct ux500_dma_channel rx_channel [ UX500_MUSB_DMA_NUM_RX_TX_CHANNELS ] ;
struct ux500_dma_channel tx_channel [ UX500_MUSB_DMA_NUM_RX_TX_CHANNELS ] ;
2011-03-22 17:55:58 +03:00
void * private_data ;
dma_addr_t phy_base ;
} ;
/* Work function invoked from DMA callback to handle rx transfers. */
2013-03-22 19:03:32 +04:00
static void ux500_dma_callback ( void * private_data )
2011-03-22 17:55:58 +03:00
{
usb: musb: ux500: optimize DMA callback routine
Skip the use of work queue and call musb_dma_completion() directly from
DMA callback context.
Here follows measurements on a Snowball board with ondemand governor active.
Performance using work queue:
(105 MB) copied, 6.23758 s, 16.8 MB/s
(105 MB) copied, 5.7151 s, 18.3 MB/s
(105 MB) copied, 5.83583 s, 18.0 MB/s
(105 MB) copied, 5.93611 s, 17.7 MB/s
Performance without work queue
(105 MB) copied, 5.62173 s, 18.7 MB/s
(105 MB) copied, 5.61811 s, 18.7 MB/s
(105 MB) copied, 5.57817 s, 18.8 MB/s
(105 MB) copied, 5.58549 s, 18.8 MB/s
Signed-off-by: Per Forlin <per.forlin@linaro.org>
Acked-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2011-08-17 13:03:40 +04:00
struct dma_channel * channel = private_data ;
struct ux500_dma_channel * ux500_channel = channel - > private_data ;
2011-03-22 17:55:58 +03:00
struct musb_hw_ep * hw_ep = ux500_channel - > hw_ep ;
struct musb * musb = hw_ep - > musb ;
unsigned long flags ;
2011-08-03 16:22:17 +04:00
dev_dbg ( musb - > controller , " DMA rx transfer done on hw_ep=%d \n " ,
hw_ep - > epnum ) ;
2011-03-22 17:55:58 +03:00
spin_lock_irqsave ( & musb - > lock , flags ) ;
ux500_channel - > channel . actual_len = ux500_channel - > cur_len ;
ux500_channel - > channel . status = MUSB_DMA_STATUS_FREE ;
2013-05-15 16:03:25 +04:00
musb_dma_completion ( musb , hw_ep - > epnum , ux500_channel - > is_tx ) ;
2011-03-22 17:55:58 +03:00
spin_unlock_irqrestore ( & musb - > lock , flags ) ;
}
static bool ux500_configure_channel ( struct dma_channel * channel ,
u16 packet_sz , u8 mode ,
dma_addr_t dma_addr , u32 len )
{
struct ux500_dma_channel * ux500_channel = channel - > private_data ;
struct musb_hw_ep * hw_ep = ux500_channel - > hw_ep ;
struct dma_chan * dma_chan = ux500_channel - > dma_chan ;
struct dma_async_tx_descriptor * dma_desc ;
2011-10-14 20:20:31 +04:00
enum dma_transfer_direction direction ;
2011-03-22 17:55:58 +03:00
struct scatterlist sg ;
struct dma_slave_config slave_conf ;
enum dma_slave_buswidth addr_width ;
2011-08-03 16:22:17 +04:00
struct musb * musb = ux500_channel - > controller - > private_data ;
2014-11-24 22:05:02 +03:00
dma_addr_t usb_fifo_addr = ( musb - > io . fifo_offset ( hw_ep - > epnum ) +
ux500_channel - > controller - > phy_base ) ;
2011-03-22 17:55:58 +03:00
2011-08-03 16:22:17 +04:00
dev_dbg ( musb - > controller ,
2014-08-06 08:43:55 +04:00
" packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d \n " ,
2013-02-06 11:53:01 +04:00
packet_sz , mode , ( unsigned long long ) dma_addr ,
len , ux500_channel - > is_tx ) ;
2011-03-22 17:55:58 +03:00
ux500_channel - > cur_len = len ;
sg_init_table ( & sg , 1 ) ;
sg_set_page ( & sg , pfn_to_page ( PFN_DOWN ( dma_addr ) ) , len ,
offset_in_page ( dma_addr ) ) ;
sg_dma_address ( & sg ) = dma_addr ;
sg_dma_len ( & sg ) = len ;
2011-10-14 20:20:31 +04:00
direction = ux500_channel - > is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM ;
2011-03-22 17:55:58 +03:00
addr_width = ( len & 0x3 ) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
DMA_SLAVE_BUSWIDTH_4_BYTES ;
slave_conf . direction = direction ;
2011-08-02 19:33:39 +04:00
slave_conf . src_addr = usb_fifo_addr ;
slave_conf . src_addr_width = addr_width ;
slave_conf . src_maxburst = 16 ;
slave_conf . dst_addr = usb_fifo_addr ;
slave_conf . dst_addr_width = addr_width ;
slave_conf . dst_maxburst = 16 ;
2012-02-01 14:42:19 +04:00
slave_conf . device_fc = false ;
2011-08-02 19:33:39 +04:00
2014-10-11 19:40:38 +04:00
dmaengine_slave_config ( dma_chan , & slave_conf ) ;
2011-03-22 17:55:58 +03:00
2012-03-09 01:11:18 +04:00
dma_desc = dmaengine_prep_slave_sg ( dma_chan , & sg , 1 , direction ,
2011-03-22 17:55:58 +03:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( ! dma_desc )
return false ;
dma_desc - > callback = ux500_dma_callback ;
dma_desc - > callback_param = channel ;
ux500_channel - > cookie = dma_desc - > tx_submit ( dma_desc ) ;
dma_async_issue_pending ( dma_chan ) ;
return true ;
}
static struct dma_channel * ux500_dma_channel_allocate ( struct dma_controller * c ,
struct musb_hw_ep * hw_ep , u8 is_tx )
{
struct ux500_dma_controller * controller = container_of ( c ,
struct ux500_dma_controller , controller ) ;
struct ux500_dma_channel * ux500_channel = NULL ;
2011-08-03 16:22:17 +04:00
struct musb * musb = controller - > private_data ;
2011-03-22 17:55:58 +03:00
u8 ch_num = hw_ep - > epnum - 1 ;
2013-05-15 13:51:43 +04:00
/* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
2011-03-22 17:55:58 +03:00
* to specified hw_ep . For example DMA channel 0 can only be allocated
* to hw_ep 1 and 9.
*/
if ( ch_num > 7 )
ch_num - = 8 ;
2013-05-15 13:51:43 +04:00
if ( ch_num > = UX500_MUSB_DMA_NUM_RX_TX_CHANNELS )
2011-03-22 17:55:58 +03:00
return NULL ;
ux500_channel = is_tx ? & ( controller - > tx_channel [ ch_num ] ) :
& ( controller - > rx_channel [ ch_num ] ) ;
/* Check if channel is already used. */
if ( ux500_channel - > is_allocated )
return NULL ;
ux500_channel - > hw_ep = hw_ep ;
ux500_channel - > is_allocated = 1 ;
2011-08-03 16:22:17 +04:00
dev_dbg ( musb - > controller , " hw_ep=%d, is_tx=0x%x, channel=%d \n " ,
2011-03-22 17:55:58 +03:00
hw_ep - > epnum , is_tx , ch_num ) ;
return & ( ux500_channel - > channel ) ;
}
static void ux500_dma_channel_release ( struct dma_channel * channel )
{
struct ux500_dma_channel * ux500_channel = channel - > private_data ;
2011-08-03 16:22:17 +04:00
struct musb * musb = ux500_channel - > controller - > private_data ;
2011-03-22 17:55:58 +03:00
2011-08-03 16:22:17 +04:00
dev_dbg ( musb - > controller , " channel=%d \n " , ux500_channel - > ch_num ) ;
2011-03-22 17:55:58 +03:00
if ( ux500_channel - > is_allocated ) {
ux500_channel - > is_allocated = 0 ;
channel - > status = MUSB_DMA_STATUS_FREE ;
channel - > actual_len = 0 ;
}
}
static int ux500_dma_is_compatible ( struct dma_channel * channel ,
u16 maxpacket , void * buf , u32 length )
{
if ( ( maxpacket & 0x3 ) | |
2013-02-06 11:53:01 +04:00
( ( unsigned long int ) buf & 0x3 ) | |
2011-03-22 17:55:58 +03:00
( length < 512 ) | |
( length & 0x3 ) )
return false ;
else
return true ;
}
static int ux500_dma_channel_program ( struct dma_channel * channel ,
u16 packet_sz , u8 mode ,
dma_addr_t dma_addr , u32 len )
{
int ret ;
BUG_ON ( channel - > status = = MUSB_DMA_STATUS_UNKNOWN | |
channel - > status = = MUSB_DMA_STATUS_BUSY ) ;
if ( ! ux500_dma_is_compatible ( channel , packet_sz , ( void * ) dma_addr , len ) )
return false ;
channel - > status = MUSB_DMA_STATUS_BUSY ;
channel - > actual_len = 0 ;
ret = ux500_configure_channel ( channel , packet_sz , mode , dma_addr , len ) ;
if ( ! ret )
channel - > status = MUSB_DMA_STATUS_FREE ;
return ret ;
}
static int ux500_dma_channel_abort ( struct dma_channel * channel )
{
struct ux500_dma_channel * ux500_channel = channel - > private_data ;
struct ux500_dma_controller * controller = ux500_channel - > controller ;
struct musb * musb = controller - > private_data ;
void __iomem * epio = musb - > endpoints [ ux500_channel - > hw_ep - > epnum ] . regs ;
u16 csr ;
2011-08-03 16:22:17 +04:00
dev_dbg ( musb - > controller , " channel=%d, is_tx=%d \n " ,
ux500_channel - > ch_num , ux500_channel - > is_tx ) ;
2011-03-22 17:55:58 +03:00
if ( channel - > status = = MUSB_DMA_STATUS_BUSY ) {
if ( ux500_channel - > is_tx ) {
csr = musb_readw ( epio , MUSB_TXCSR ) ;
csr & = ~ ( MUSB_TXCSR_AUTOSET |
MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_DMAMODE ) ;
musb_writew ( epio , MUSB_TXCSR , csr ) ;
} else {
csr = musb_readw ( epio , MUSB_RXCSR ) ;
csr & = ~ ( MUSB_RXCSR_AUTOCLEAR |
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE ) ;
musb_writew ( epio , MUSB_RXCSR , csr ) ;
}
2014-10-11 19:40:38 +04:00
dmaengine_terminate_all ( ux500_channel - > dma_chan ) ;
2011-03-22 17:55:58 +03:00
channel - > status = MUSB_DMA_STATUS_FREE ;
}
return 0 ;
}
2013-06-19 19:38:11 +04:00
static void ux500_dma_controller_stop ( struct ux500_dma_controller * controller )
2011-03-22 17:55:58 +03:00
{
struct ux500_dma_channel * ux500_channel ;
struct dma_channel * channel ;
u8 ch_num ;
2013-05-15 13:51:43 +04:00
for ( ch_num = 0 ; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS ; ch_num + + ) {
2011-03-22 17:55:58 +03:00
channel = & controller - > rx_channel [ ch_num ] . channel ;
ux500_channel = channel - > private_data ;
ux500_dma_channel_release ( channel ) ;
if ( ux500_channel - > dma_chan )
dma_release_channel ( ux500_channel - > dma_chan ) ;
}
2013-05-15 13:51:43 +04:00
for ( ch_num = 0 ; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS ; ch_num + + ) {
2011-03-22 17:55:58 +03:00
channel = & controller - > tx_channel [ ch_num ] . channel ;
ux500_channel = channel - > private_data ;
ux500_dma_channel_release ( channel ) ;
if ( ux500_channel - > dma_chan )
dma_release_channel ( ux500_channel - > dma_chan ) ;
}
}
2013-06-19 19:38:11 +04:00
static int ux500_dma_controller_start ( struct ux500_dma_controller * controller )
2011-03-22 17:55:58 +03:00
{
struct ux500_dma_channel * ux500_channel = NULL ;
struct musb * musb = controller - > private_data ;
struct device * dev = musb - > controller ;
2013-07-30 12:03:12 +04:00
struct musb_hdrc_platform_data * plat = dev_get_platdata ( dev ) ;
2013-05-15 13:51:46 +04:00
struct ux500_musb_board_data * data ;
2011-03-22 17:55:58 +03:00
struct dma_channel * dma_channel = NULL ;
2013-05-15 13:51:47 +04:00
char * * chan_names ;
2011-03-22 17:55:58 +03:00
u32 ch_num ;
u8 dir ;
u8 is_tx = 0 ;
void * * param_array ;
struct ux500_dma_channel * channel_array ;
dma_cap_mask_t mask ;
2013-05-15 13:51:46 +04:00
if ( ! plat ) {
dev_err ( musb - > controller , " No platform data \n " ) ;
2011-03-22 17:55:58 +03:00
return - EINVAL ;
2013-05-15 13:51:46 +04:00
}
2011-03-22 17:55:58 +03:00
2013-05-15 13:51:46 +04:00
data = plat - > board_data ;
2011-03-22 17:55:58 +03:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* Prepare the loop for RX channels */
channel_array = controller - > rx_channel ;
2013-05-15 13:51:46 +04:00
param_array = data ? data - > dma_rx_param_array : NULL ;
2013-05-15 13:51:47 +04:00
chan_names = ( char * * ) iep_chan_names ;
2011-03-22 17:55:58 +03:00
for ( dir = 0 ; dir < 2 ; dir + + ) {
2013-05-15 13:51:43 +04:00
for ( ch_num = 0 ;
ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS ;
ch_num + + ) {
2011-03-22 17:55:58 +03:00
ux500_channel = & channel_array [ ch_num ] ;
ux500_channel - > controller = controller ;
ux500_channel - > ch_num = ch_num ;
ux500_channel - > is_tx = is_tx ;
dma_channel = & ( ux500_channel - > channel ) ;
dma_channel - > private_data = ux500_channel ;
dma_channel - > status = MUSB_DMA_STATUS_FREE ;
dma_channel - > max_len = SZ_16M ;
2013-05-15 13:51:47 +04:00
ux500_channel - > dma_chan =
dma_request_slave_channel ( dev , chan_names [ ch_num ] ) ;
if ( ! ux500_channel - > dma_chan )
ux500_channel - > dma_chan =
dma_request_channel ( mask ,
2013-08-21 15:47:03 +04:00
data ?
data - > dma_filter :
NULL ,
2013-12-13 14:47:28 +04:00
param_array ?
param_array [ ch_num ] :
NULL ) ;
2013-05-15 13:51:47 +04:00
2011-03-22 17:55:58 +03:00
if ( ! ux500_channel - > dma_chan ) {
ERR ( " Dma pipe allocation error dir=%d ch=%d \n " ,
dir , ch_num ) ;
/* Release already allocated channels */
2013-06-19 19:38:11 +04:00
ux500_dma_controller_stop ( controller ) ;
2011-03-22 17:55:58 +03:00
return - EBUSY ;
}
}
/* Prepare the loop for TX channels */
channel_array = controller - > tx_channel ;
2013-05-15 13:51:46 +04:00
param_array = data ? data - > dma_tx_param_array : NULL ;
2013-05-15 13:51:47 +04:00
chan_names = ( char * * ) oep_chan_names ;
2011-03-22 17:55:58 +03:00
is_tx = 1 ;
}
return 0 ;
}
void dma_controller_destroy ( struct dma_controller * c )
{
struct ux500_dma_controller * controller = container_of ( c ,
struct ux500_dma_controller , controller ) ;
2013-06-19 19:38:11 +04:00
ux500_dma_controller_stop ( controller ) ;
2011-03-22 17:55:58 +03:00
kfree ( controller ) ;
}
2013-05-15 16:03:25 +04:00
struct dma_controller * dma_controller_create ( struct musb * musb ,
void __iomem * base )
2011-03-22 17:55:58 +03:00
{
struct ux500_dma_controller * controller ;
struct platform_device * pdev = to_platform_device ( musb - > controller ) ;
struct resource * iomem ;
2013-06-19 19:38:11 +04:00
int ret ;
2011-03-22 17:55:58 +03:00
controller = kzalloc ( sizeof ( * controller ) , GFP_KERNEL ) ;
if ( ! controller )
2013-03-08 06:27:05 +04:00
goto kzalloc_fail ;
2011-03-22 17:55:58 +03:00
controller - > private_data = musb ;
/* Save physical address for DMA controller. */
iomem = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2013-03-08 06:27:05 +04:00
if ( ! iomem ) {
dev_err ( musb - > controller , " no memory resource defined \n " ) ;
goto plat_get_fail ;
}
2011-03-22 17:55:58 +03:00
controller - > phy_base = ( dma_addr_t ) iomem - > start ;
controller - > controller . channel_alloc = ux500_dma_channel_allocate ;
controller - > controller . channel_release = ux500_dma_channel_release ;
controller - > controller . channel_program = ux500_dma_channel_program ;
controller - > controller . channel_abort = ux500_dma_channel_abort ;
controller - > controller . is_compatible = ux500_dma_is_compatible ;
2013-06-19 19:38:11 +04:00
ret = ux500_dma_controller_start ( controller ) ;
if ( ret )
goto plat_get_fail ;
2011-03-22 17:55:58 +03:00
return & controller - > controller ;
2013-03-08 06:27:05 +04:00
plat_get_fail :
kfree ( controller ) ;
kzalloc_fail :
return NULL ;
2011-03-22 17:55:58 +03:00
}