2017-11-03 11:28:30 +01:00
// SPDX-License-Identifier: GPL-1.0+
2011-06-06 14:18:03 +09:00
/*
* Renesas USB driver
*
* Copyright ( C ) 2011 Renesas Solutions Corp .
* Kuninori Morimoto < kuninori . morimoto . gx @ renesas . com >
*/
# include <linux/delay.h>
# include <linux/io.h>
2011-07-26 20:51:01 +02:00
# include <linux/scatterlist.h>
2012-06-03 18:39:13 +02:00
# include "common.h"
# include "pipe.h"
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
# define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
2012-01-05 15:37:22 +09:00
# define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
2011-06-06 14:18:44 +09:00
2011-06-06 14:18:50 +09:00
# define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
2011-06-06 14:18:07 +09:00
/*
2011-07-07 00:23:24 -07:00
* packet initialize
*/
void usbhs_pkt_init ( struct usbhs_pkt * pkt )
{
INIT_LIST_HEAD ( & pkt - > node ) ;
}
/*
* packet control function
2011-06-06 14:18:07 +09:00
*/
2011-06-06 14:18:38 +09:00
static int usbhsf_null_handle ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:18:28 +09:00
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pkt - > pipe ) ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
dev_err ( dev , " null handler \n " ) ;
return - EINVAL ;
}
2015-12-27 21:50:29 +01:00
static const struct usbhs_pkt_handle usbhsf_null_handler = {
2011-06-06 14:18:28 +09:00
. prepare = usbhsf_null_handle ,
. try_run = usbhsf_null_handle ,
} ;
2011-06-06 14:18:23 +09:00
void usbhs_pkt_push ( struct usbhs_pipe * pipe , struct usbhs_pkt * pkt ,
2011-10-10 22:04:41 -07:00
void ( * done ) ( struct usbhs_priv * priv ,
struct usbhs_pkt * pkt ) ,
2011-12-08 18:28:54 -08:00
void * buf , int len , int zero , int sequence )
2011-06-06 14:18:16 +09:00
{
2011-06-06 14:18:28 +09:00
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
2011-06-06 14:18:38 +09:00
unsigned long flags ;
2011-10-10 22:04:41 -07:00
if ( ! done ) {
dev_err ( dev , " no done function \n " ) ;
return ;
}
2011-10-18 20:05:50 -07:00
/******************** spin lock ********************/
usbhs_lock ( priv , flags ) ;
2011-10-10 22:00:59 -07:00
if ( ! pipe - > handler ) {
2011-06-06 14:18:28 +09:00
dev_err ( dev , " no handler function \n " ) ;
2011-10-10 22:00:59 -07:00
pipe - > handler = & usbhsf_null_handler ;
2011-06-06 14:18:28 +09:00
}
2012-02-14 11:37:17 +01:00
list_move_tail ( & pkt - > node , & pipe - > list ) ;
2011-06-06 14:18:16 +09:00
2011-10-10 22:00:59 -07:00
/*
* each pkt must hold own handler .
* because handler might be changed by its situation .
* dma handler - > pio handler .
*/
2011-06-06 14:18:23 +09:00
pkt - > pipe = pipe ;
pkt - > buf = buf ;
2011-10-10 22:00:59 -07:00
pkt - > handler = pipe - > handler ;
2011-06-06 14:18:23 +09:00
pkt - > length = len ;
pkt - > zero = zero ;
pkt - > actual = 0 ;
2011-10-10 22:04:41 -07:00
pkt - > done = done ;
2011-12-08 18:28:54 -08:00
pkt - > sequence = sequence ;
2011-06-06 14:18:38 +09:00
usbhs_unlock ( priv , flags ) ;
/******************** spin unlock ******************/
2011-06-06 14:18:16 +09:00
}
2011-06-06 14:18:38 +09:00
static void __usbhsf_pkt_del ( struct usbhs_pkt * pkt )
2011-06-06 14:18:16 +09:00
{
list_del_init ( & pkt - > node ) ;
}
2011-06-06 14:18:38 +09:00
static struct usbhs_pkt * __usbhsf_pkt_get ( struct usbhs_pipe * pipe )
2011-06-06 14:18:16 +09:00
{
2016-09-19 01:03:15 +09:00
return list_first_entry_or_null ( & pipe - > list , struct usbhs_pkt , node ) ;
2011-06-06 14:18:16 +09:00
}
2014-08-22 20:14:28 +09:00
static void usbhsf_fifo_unselect ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo ) ;
static struct dma_chan * usbhsf_dma_chan_get ( struct usbhs_fifo * fifo ,
struct usbhs_pkt * pkt ) ;
# define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
# define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl ( struct usbhs_pkt * pkt , int map ) ;
2011-06-06 14:18:38 +09:00
struct usbhs_pkt * usbhs_pkt_pop ( struct usbhs_pipe * pipe , struct usbhs_pkt * pkt )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
2014-08-22 20:14:28 +09:00
struct usbhs_fifo * fifo = usbhs_pipe_to_fifo ( pipe ) ;
2011-06-06 14:18:38 +09:00
unsigned long flags ;
/******************** spin lock ********************/
usbhs_lock ( priv , flags ) ;
2014-08-22 20:14:28 +09:00
usbhs_pipe_disable ( pipe ) ;
2011-06-06 14:18:38 +09:00
if ( ! pkt )
pkt = __usbhsf_pkt_get ( pipe ) ;
2014-08-22 20:14:28 +09:00
if ( pkt ) {
struct dma_chan * chan = NULL ;
if ( fifo )
chan = usbhsf_dma_chan_get ( fifo , pkt ) ;
if ( chan ) {
dmaengine_terminate_all ( chan ) ;
usbhsf_dma_unmap ( pkt ) ;
}
2017-12-06 17:18:33 +09:00
usbhs_pipe_clear_without_sequence ( pipe , 0 , 0 ) ;
2011-06-06 14:18:38 +09:00
__usbhsf_pkt_del ( pkt ) ;
2014-08-22 20:14:28 +09:00
}
if ( fifo )
usbhsf_fifo_unselect ( pipe , fifo ) ;
2011-06-06 14:18:38 +09:00
usbhs_unlock ( priv , flags ) ;
/******************** spin unlock ******************/
return pkt ;
}
2011-10-10 21:58:45 -07:00
enum {
USBHSF_PKT_PREPARE ,
USBHSF_PKT_TRY_RUN ,
USBHSF_PKT_DMA_DONE ,
} ;
static int usbhsf_pkt_handler ( struct usbhs_pipe * pipe , int type )
2011-06-06 14:18:38 +09:00
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_pkt * pkt ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int ( * func ) ( struct usbhs_pkt * pkt , int * is_done ) ;
unsigned long flags ;
int ret = 0 ;
int is_done = 0 ;
/******************** spin lock ********************/
usbhs_lock ( priv , flags ) ;
pkt = __usbhsf_pkt_get ( pipe ) ;
if ( ! pkt )
goto __usbhs_pkt_handler_end ;
switch ( type ) {
case USBHSF_PKT_PREPARE :
func = pkt - > handler - > prepare ;
break ;
case USBHSF_PKT_TRY_RUN :
func = pkt - > handler - > try_run ;
break ;
2011-06-06 14:19:03 +09:00
case USBHSF_PKT_DMA_DONE :
func = pkt - > handler - > dma_done ;
break ;
2011-06-06 14:18:38 +09:00
default :
2012-11-01 00:03:51 +09:00
dev_err ( dev , " unknown pkt handler \n " ) ;
2011-06-06 14:18:38 +09:00
goto __usbhs_pkt_handler_end ;
}
2016-03-10 11:30:14 +09:00
if ( likely ( func ) )
ret = func ( pkt , & is_done ) ;
2011-06-06 14:18:38 +09:00
if ( is_done )
__usbhsf_pkt_del ( pkt ) ;
__usbhs_pkt_handler_end :
usbhs_unlock ( priv , flags ) ;
/******************** spin unlock ******************/
2011-06-06 14:18:54 +09:00
if ( is_done ) {
2011-10-10 22:04:41 -07:00
pkt - > done ( priv , pkt ) ;
2011-06-06 14:18:54 +09:00
usbhs_pkt_start ( pipe ) ;
}
2011-06-06 14:18:38 +09:00
return ret ;
}
2011-10-10 21:58:45 -07:00
void usbhs_pkt_start ( struct usbhs_pipe * pipe )
{
usbhsf_pkt_handler ( pipe , USBHSF_PKT_PREPARE ) ;
}
2011-06-06 14:18:23 +09:00
/*
* irq enable / disable function
*/
2012-10-29 00:45:07 -07:00
# define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
# define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
2011-06-06 14:18:23 +09:00
# define usbhsf_irq_callback_ctrl(pipe, status, enable) \
( { \
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ; \
struct usbhs_mod * mod = usbhs_mod_get_current ( priv ) ; \
u16 status = ( 1 < < usbhs_pipe_number ( pipe ) ) ; \
if ( ! mod ) \
return ; \
if ( enable ) \
2012-10-29 00:45:07 -07:00
mod - > status | = status ; \
2011-06-06 14:18:23 +09:00
else \
2012-10-29 00:45:07 -07:00
mod - > status & = ~ status ; \
2011-06-06 14:18:23 +09:00
usbhs_irq_callback_update ( priv , mod ) ; \
} )
static void usbhsf_tx_irq_ctrl ( struct usbhs_pipe * pipe , int enable )
{
/*
* And DCP pipe can NOT use " ready interrupt " for " send "
* it should use " empty " interrupt .
* see
* " Operation " - " Interrupt Function " - " BRDY Interrupt "
*
* on the other hand , normal pipe can use " ready interrupt " for " send "
* even though it is single / double buffer
*/
if ( usbhs_pipe_is_dcp ( pipe ) )
usbhsf_irq_empty_ctrl ( pipe , enable ) ;
else
usbhsf_irq_ready_ctrl ( pipe , enable ) ;
}
static void usbhsf_rx_irq_ctrl ( struct usbhs_pipe * pipe , int enable )
{
usbhsf_irq_ready_ctrl ( pipe , enable ) ;
}
2011-06-06 14:18:03 +09:00
/*
* FIFO ctrl
*/
2011-06-06 14:18:44 +09:00
static void usbhsf_send_terminator ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo )
2011-06-06 14:18:03 +09:00
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
2011-06-06 14:18:44 +09:00
usbhs_bset ( priv , fifo - > ctr , BVAL , BVAL ) ;
2011-06-06 14:18:03 +09:00
}
2011-06-06 14:18:44 +09:00
static int usbhsf_fifo_barrier ( struct usbhs_priv * priv ,
struct usbhs_fifo * fifo )
2011-06-06 14:18:03 +09:00
{
2017-12-06 17:18:31 +09:00
/* The FIFO port is accessible */
if ( usbhs_read ( priv , fifo - > ctr ) & FRDY )
return 0 ;
2011-06-06 14:18:03 +09:00
return - EBUSY ;
}
2011-06-06 14:18:44 +09:00
static void usbhsf_fifo_clear ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo )
2011-06-06 14:18:03 +09:00
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
2017-09-27 18:47:12 +09:00
int ret = 0 ;
2011-06-06 14:18:03 +09:00
2017-09-27 18:47:13 +09:00
if ( ! usbhs_pipe_is_dcp ( pipe ) ) {
/*
* This driver checks the pipe condition first to avoid - EBUSY
2017-12-06 17:18:31 +09:00
* from usbhsf_fifo_barrier ( ) if the pipe is RX direction and
* empty .
2017-09-27 18:47:13 +09:00
*/
if ( usbhs_pipe_is_dir_in ( pipe ) )
ret = usbhs_pipe_is_accessible ( pipe ) ;
if ( ! ret )
ret = usbhsf_fifo_barrier ( priv , fifo ) ;
}
2011-06-06 14:18:03 +09:00
2017-09-27 18:47:12 +09:00
/*
* if non - DCP pipe , this driver should set BCLR when
* usbhsf_fifo_barrier ( ) returns 0.
*/
if ( ! ret )
usbhs_write ( priv , fifo - > ctr , BCLR ) ;
2011-06-06 14:18:03 +09:00
}
2011-06-06 14:18:44 +09:00
static int usbhsf_fifo_rcv_len ( struct usbhs_priv * priv ,
struct usbhs_fifo * fifo )
2011-06-06 14:18:03 +09:00
{
2011-06-06 14:18:44 +09:00
return usbhs_read ( priv , fifo - > ctr ) & DTLN_MASK ;
2011-06-06 14:18:03 +09:00
}
2011-06-06 14:18:50 +09:00
static void usbhsf_fifo_unselect ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
usbhs_pipe_select_fifo ( pipe , NULL ) ;
usbhs_write ( priv , fifo - > sel , 0 ) ;
}
2011-06-06 14:18:44 +09:00
static int usbhsf_fifo_select ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo ,
int write )
2011-06-06 14:18:03 +09:00
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int timeout = 1024 ;
u16 mask = ( ( 1 < < 5 ) | 0xF ) ; /* mask of ISEL | CURPIPE */
u16 base = usbhs_pipe_number ( pipe ) ; /* CURPIPE */
2011-06-06 14:18:50 +09:00
if ( usbhs_pipe_is_busy ( pipe ) | |
usbhsf_fifo_is_busy ( fifo ) )
return - EBUSY ;
2011-10-10 22:02:57 -07:00
if ( usbhs_pipe_is_dcp ( pipe ) ) {
2011-06-06 14:18:03 +09:00
base | = ( 1 = = write ) < < 5 ; /* ISEL */
2011-10-10 22:02:57 -07:00
if ( usbhs_mod_is_host ( priv ) )
usbhs_dcp_dir_for_host ( pipe , write ) ;
}
2011-06-06 14:18:03 +09:00
/* "base" will be used below */
2012-01-05 15:37:22 +09:00
if ( usbhs_get_dparam ( priv , has_sudmac ) & & ! usbhsf_is_cfifo ( priv , fifo ) )
usbhs_write ( priv , fifo - > sel , base ) ;
else
usbhs_write ( priv , fifo - > sel , base | MBW_32 ) ;
2011-06-06 14:18:03 +09:00
/* check ISEL and CURPIPE value */
while ( timeout - - ) {
2011-06-06 14:18:50 +09:00
if ( base = = ( mask & usbhs_read ( priv , fifo - > sel ) ) ) {
usbhs_pipe_select_fifo ( pipe , fifo ) ;
2011-06-06 14:18:03 +09:00
return 0 ;
2011-06-06 14:18:50 +09:00
}
2011-06-06 14:18:03 +09:00
udelay ( 10 ) ;
}
dev_err ( dev , " fifo select error \n " ) ;
return - EIO ;
}
2011-10-10 22:07:08 -07:00
/*
* DCP status stage
*/
static int usbhs_dcp_dir_switch_to_write ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ; /* CFIFO */
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int ret ;
usbhs_pipe_disable ( pipe ) ;
ret = usbhsf_fifo_select ( pipe , fifo , 1 ) ;
if ( ret < 0 ) {
dev_err ( dev , " %s() faile \n " , __func__ ) ;
return ret ;
}
usbhs_pipe_sequence_data1 ( pipe ) ; /* DATA1 */
usbhsf_fifo_clear ( pipe , fifo ) ;
usbhsf_send_terminator ( pipe , fifo ) ;
usbhsf_fifo_unselect ( pipe , fifo ) ;
usbhsf_tx_irq_ctrl ( pipe , 1 ) ;
usbhs_pipe_enable ( pipe ) ;
return ret ;
}
static int usbhs_dcp_dir_switch_to_read ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ; /* CFIFO */
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int ret ;
usbhs_pipe_disable ( pipe ) ;
ret = usbhsf_fifo_select ( pipe , fifo , 0 ) ;
if ( ret < 0 ) {
dev_err ( dev , " %s() fail \n " , __func__ ) ;
return ret ;
}
usbhs_pipe_sequence_data1 ( pipe ) ; /* DATA1 */
usbhsf_fifo_clear ( pipe , fifo ) ;
usbhsf_fifo_unselect ( pipe , fifo ) ;
usbhsf_rx_irq_ctrl ( pipe , 1 ) ;
usbhs_pipe_enable ( pipe ) ;
return ret ;
}
static int usbhs_dcp_dir_switch_done ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
if ( pkt - > handler = = & usbhs_dcp_status_stage_in_handler )
usbhsf_tx_irq_ctrl ( pipe , 0 ) ;
else
usbhsf_rx_irq_ctrl ( pipe , 0 ) ;
pkt - > actual = pkt - > length ;
* is_done = 1 ;
return 0 ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
2011-10-10 22:07:08 -07:00
. prepare = usbhs_dcp_dir_switch_to_write ,
. try_run = usbhs_dcp_dir_switch_done ,
} ;
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
2011-10-10 22:07:08 -07:00
. prepare = usbhs_dcp_dir_switch_to_read ,
. try_run = usbhs_dcp_dir_switch_done ,
} ;
/*
* DCP data stage ( push )
*/
static int usbhsf_dcp_data_stage_try_push ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
usbhs_pipe_sequence_data1 ( pipe ) ; /* DATA1 */
/*
* change handler to PIO push
*/
pkt - > handler = & usbhs_fifo_pio_push_handler ;
return pkt - > handler - > prepare ( pkt , is_done ) ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
2011-10-10 22:07:08 -07:00
. prepare = usbhsf_dcp_data_stage_try_push ,
} ;
/*
* DCP data stage ( pop )
*/
static int usbhsf_dcp_data_stage_prepare_pop ( struct usbhs_pkt * pkt ,
int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ;
if ( usbhs_pipe_is_busy ( pipe ) )
return 0 ;
/*
* prepare pop for DCP should
* - change DCP direction ,
* - clear fifo
* - DATA1
*/
usbhs_pipe_disable ( pipe ) ;
usbhs_pipe_sequence_data1 ( pipe ) ; /* DATA1 */
usbhsf_fifo_select ( pipe , fifo , 0 ) ;
usbhsf_fifo_clear ( pipe , fifo ) ;
usbhsf_fifo_unselect ( pipe , fifo ) ;
/*
* change handler to PIO pop
*/
pkt - > handler = & usbhs_fifo_pio_pop_handler ;
return pkt - > handler - > prepare ( pkt , is_done ) ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
2011-10-10 22:07:08 -07:00
. prepare = usbhsf_dcp_data_stage_prepare_pop ,
} ;
2011-06-06 14:18:03 +09:00
/*
2011-07-07 00:23:24 -07:00
* PIO push handler
2011-06-06 14:18:03 +09:00
*/
2011-06-06 14:18:58 +09:00
static int usbhsf_pio_try_push ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:18:03 +09:00
{
2011-06-06 14:18:07 +09:00
struct usbhs_pipe * pipe = pkt - > pipe ;
2011-06-06 14:18:03 +09:00
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
2011-06-06 14:18:23 +09:00
struct device * dev = usbhs_priv_to_dev ( priv ) ;
2011-06-06 14:18:44 +09:00
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ; /* CFIFO */
void __iomem * addr = priv - > base + fifo - > port ;
2011-06-06 14:18:23 +09:00
u8 * buf ;
2011-06-06 14:18:03 +09:00
int maxp = usbhs_pipe_get_maxpacket ( pipe ) ;
int total_len ;
2011-06-06 14:18:07 +09:00
int i , ret , len ;
2011-06-06 14:18:38 +09:00
int is_short ;
2011-06-06 14:18:03 +09:00
2011-12-08 18:28:54 -08:00
usbhs_pipe_data_sequence ( pipe , pkt - > sequence ) ;
pkt - > sequence = - 1 ; /* -1 sequence will be ignored */
2012-11-06 16:15:09 -08:00
usbhs_pipe_set_trans_count_if_bulk ( pipe , pkt - > length ) ;
2011-06-06 14:18:44 +09:00
ret = usbhsf_fifo_select ( pipe , fifo , 1 ) ;
2011-06-06 14:18:03 +09:00
if ( ret < 0 )
2011-06-06 14:18:50 +09:00
return 0 ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:28 +09:00
ret = usbhs_pipe_is_accessible ( pipe ) ;
2011-07-03 17:42:47 -07:00
if ( ret < 0 ) {
/* inaccessible pipe is not an error */
ret = 0 ;
2011-06-06 14:18:23 +09:00
goto usbhs_fifo_write_busy ;
2011-07-03 17:42:47 -07:00
}
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
ret = usbhsf_fifo_barrier ( priv , fifo ) ;
2011-06-06 14:18:03 +09:00
if ( ret < 0 )
2011-06-06 14:18:23 +09:00
goto usbhs_fifo_write_busy ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:23 +09:00
buf = pkt - > buf + pkt - > actual ;
len = pkt - > length - pkt - > actual ;
len = min ( len , maxp ) ;
total_len = len ;
is_short = total_len < maxp ;
2011-06-06 14:18:03 +09:00
/*
* FIXME
*
* 32 - bit access only
*/
2011-06-06 14:18:23 +09:00
if ( len > = 4 & & ! ( ( unsigned long ) buf & 0x03 ) ) {
2011-06-06 14:18:03 +09:00
iowrite32_rep ( addr , buf , len / 4 ) ;
len % = 4 ;
buf + = total_len - len ;
}
/* the rest operation */
for ( i = 0 ; i < len ; i + + )
iowrite8 ( buf [ i ] , addr + ( 0x03 - ( i & 0x03 ) ) ) ;
2011-06-06 14:18:23 +09:00
/*
* variable update
*/
pkt - > actual + = total_len ;
if ( pkt - > actual < pkt - > length )
2011-06-06 14:18:38 +09:00
* is_done = 0 ; /* there are remainder data */
2011-06-06 14:18:23 +09:00
else if ( is_short )
2011-06-06 14:18:38 +09:00
* is_done = 1 ; /* short packet */
2011-06-06 14:18:23 +09:00
else
2011-06-06 14:18:38 +09:00
* is_done = ! pkt - > zero ; /* send zero packet ? */
2011-06-06 14:18:23 +09:00
/*
* pipe / irq handling
*/
if ( is_short )
2011-06-06 14:18:44 +09:00
usbhsf_send_terminator ( pipe , fifo ) ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:38 +09:00
usbhsf_tx_irq_ctrl ( pipe , ! * is_done ) ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , ! * is_done ) ;
2011-06-06 14:18:07 +09:00
usbhs_pipe_enable ( pipe ) ;
2011-06-06 14:18:23 +09:00
dev_dbg ( dev , " send %d (%d/ %d/ %d/ %d) \n " ,
usbhs_pipe_number ( pipe ) ,
2011-06-06 14:18:38 +09:00
pkt - > length , pkt - > actual , * is_done , pkt - > zero ) ;
2011-06-06 14:18:23 +09:00
2011-06-06 14:18:50 +09:00
usbhsf_fifo_unselect ( pipe , fifo ) ;
2011-06-06 14:18:07 +09:00
return 0 ;
2011-06-06 14:18:23 +09:00
usbhs_fifo_write_busy :
2011-06-06 14:18:50 +09:00
usbhsf_fifo_unselect ( pipe , fifo ) ;
2011-06-06 14:18:23 +09:00
/*
* pipe is busy .
* retry in interrupt
*/
usbhsf_tx_irq_ctrl ( pipe , 1 ) ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 1 ) ;
2011-06-06 14:18:23 +09:00
return ret ;
2011-06-06 14:18:03 +09:00
}
2014-08-22 20:13:50 +09:00
static int usbhsf_pio_prepare_push ( struct usbhs_pkt * pkt , int * is_done )
{
if ( usbhs_pipe_is_running ( pkt - > pipe ) )
return 0 ;
return usbhsf_pio_try_push ( pkt , is_done ) ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
2014-08-22 20:13:50 +09:00
. prepare = usbhsf_pio_prepare_push ,
2011-06-06 14:18:58 +09:00
. try_run = usbhsf_pio_try_push ,
2011-06-06 14:18:28 +09:00
} ;
2011-07-07 00:23:24 -07:00
/*
* PIO pop handler
*/
2011-06-06 14:18:38 +09:00
static int usbhsf_prepare_pop ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:18:03 +09:00
{
2011-06-06 14:18:28 +09:00
struct usbhs_pipe * pipe = pkt - > pipe ;
2015-05-26 20:13:42 +09:00
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ;
2011-06-06 14:18:50 +09:00
if ( usbhs_pipe_is_busy ( pipe ) )
return 0 ;
2011-06-06 14:18:03 +09:00
2014-08-22 20:13:50 +09:00
if ( usbhs_pipe_is_running ( pipe ) )
return 0 ;
2011-06-06 14:18:03 +09:00
/*
2011-06-06 14:18:50 +09:00
* pipe enable to prepare packet receive
2011-06-06 14:18:03 +09:00
*/
2011-12-08 18:28:54 -08:00
usbhs_pipe_data_sequence ( pipe , pkt - > sequence ) ;
pkt - > sequence = - 1 ; /* -1 sequence will be ignored */
2011-06-06 14:18:03 +09:00
2015-05-26 20:13:42 +09:00
if ( usbhs_pipe_is_dcp ( pipe ) )
usbhsf_fifo_clear ( pipe , fifo ) ;
2012-11-06 16:15:09 -08:00
usbhs_pipe_set_trans_count_if_bulk ( pipe , pkt - > length ) ;
2011-06-06 14:18:03 +09:00
usbhs_pipe_enable ( pipe ) ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 1 ) ;
2011-06-06 14:18:23 +09:00
usbhsf_rx_irq_ctrl ( pipe , 1 ) ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
return 0 ;
2011-06-06 14:18:03 +09:00
}
2011-06-06 14:18:58 +09:00
static int usbhsf_pio_try_pop ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:18:03 +09:00
{
2011-06-06 14:18:07 +09:00
struct usbhs_pipe * pipe = pkt - > pipe ;
2011-06-06 14:18:03 +09:00
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
2011-06-06 14:18:23 +09:00
struct device * dev = usbhs_priv_to_dev ( priv ) ;
2011-06-06 14:18:44 +09:00
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ; /* CFIFO */
void __iomem * addr = priv - > base + fifo - > port ;
2011-06-06 14:18:23 +09:00
u8 * buf ;
u32 data = 0 ;
int maxp = usbhs_pipe_get_maxpacket ( pipe ) ;
2011-06-06 14:18:07 +09:00
int rcv_len , len ;
2011-06-06 14:18:03 +09:00
int i , ret ;
2011-06-06 14:18:07 +09:00
int total_len = 0 ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
ret = usbhsf_fifo_select ( pipe , fifo , 0 ) ;
2011-06-06 14:18:03 +09:00
if ( ret < 0 )
2011-06-06 14:18:50 +09:00
return 0 ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
ret = usbhsf_fifo_barrier ( priv , fifo ) ;
2011-06-06 14:18:03 +09:00
if ( ret < 0 )
2011-06-06 14:18:50 +09:00
goto usbhs_fifo_read_busy ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:44 +09:00
rcv_len = usbhsf_fifo_rcv_len ( priv , fifo ) ;
2011-06-06 14:18:03 +09:00
2011-06-06 14:18:23 +09:00
buf = pkt - > buf + pkt - > actual ;
len = pkt - > length - pkt - > actual ;
len = min ( len , rcv_len ) ;
total_len = len ;
2011-10-10 22:05:51 -07:00
/*
* update actual length first here to decide disable pipe .
* if this pipe keeps BUF status and all data were popped ,
* then , next interrupt / token will be issued again
*/
pkt - > actual + = total_len ;
if ( ( pkt - > actual = = pkt - > length ) | | /* receive all data */
( total_len < maxp ) ) { /* short packet */
* is_done = 1 ;
usbhsf_rx_irq_ctrl ( pipe , 0 ) ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 0 ) ;
2015-05-26 20:13:43 +09:00
/*
* If function mode , since this controller is possible to enter
* Control Write status stage at this timing , this driver
* should not disable the pipe . If such a case happens , this
* controller is not able to complete the status stage .
*/
if ( ! usbhs_mod_is_host ( priv ) & & ! usbhs_pipe_is_dcp ( pipe ) )
usbhs_pipe_disable ( pipe ) ; /* disable pipe first */
2011-10-10 22:05:51 -07:00
}
2011-06-06 14:18:03 +09:00
/*
* Buffer clear if Zero - Length packet
*
* see
* " Operation " - " FIFO Buffer Memory " - " FIFO Port Function "
*/
if ( 0 = = rcv_len ) {
2011-12-08 18:28:54 -08:00
pkt - > zero = 1 ;
2011-06-06 14:18:44 +09:00
usbhsf_fifo_clear ( pipe , fifo ) ;
2011-06-06 14:18:07 +09:00
goto usbhs_fifo_read_end ;
2011-06-06 14:18:03 +09:00
}
/*
* FIXME
*
* 32 - bit access only
*/
2011-06-06 14:18:23 +09:00
if ( len > = 4 & & ! ( ( unsigned long ) buf & 0x03 ) ) {
2011-06-06 14:18:03 +09:00
ioread32_rep ( addr , buf , len / 4 ) ;
len % = 4 ;
2011-06-06 14:18:23 +09:00
buf + = total_len - len ;
2011-06-06 14:18:03 +09:00
}
/* the rest operation */
for ( i = 0 ; i < len ; i + + ) {
if ( ! ( i & 0x03 ) )
data = ioread32 ( addr ) ;
buf [ i ] = ( data > > ( ( i & 0x03 ) * 8 ) ) & 0xff ;
}
2011-06-06 14:18:07 +09:00
usbhs_fifo_read_end :
2011-06-06 14:18:38 +09:00
dev_dbg ( dev , " recv %d (%d/ %d/ %d/ %d) \n " ,
usbhs_pipe_number ( pipe ) ,
pkt - > length , pkt - > actual , * is_done , pkt - > zero ) ;
2011-06-06 14:18:50 +09:00
usbhs_fifo_read_busy :
usbhsf_fifo_unselect ( pipe , fifo ) ;
return ret ;
2011-06-06 14:18:03 +09:00
}
2011-06-06 14:18:28 +09:00
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
2011-06-06 14:18:28 +09:00
. prepare = usbhsf_prepare_pop ,
2011-06-06 14:18:58 +09:00
. try_run = usbhsf_pio_try_pop ,
2011-06-06 14:18:28 +09:00
} ;
/*
2011-07-07 00:23:24 -07:00
* DCP ctrol statge handler
2011-06-06 14:18:28 +09:00
*/
2011-06-06 14:18:38 +09:00
static int usbhsf_ctrl_stage_end ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:18:28 +09:00
{
2011-06-06 14:18:38 +09:00
usbhs_dcp_control_transfer_done ( pkt - > pipe ) ;
2011-06-06 14:18:28 +09:00
2011-06-06 14:18:38 +09:00
* is_done = 1 ;
2011-06-06 14:18:28 +09:00
return 0 ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
2011-06-06 14:18:28 +09:00
. prepare = usbhsf_ctrl_stage_end ,
. try_run = usbhsf_ctrl_stage_end ,
} ;
2011-06-06 14:19:03 +09:00
/*
* DMA fifo functions
*/
static struct dma_chan * usbhsf_dma_chan_get ( struct usbhs_fifo * fifo ,
struct usbhs_pkt * pkt )
{
if ( & usbhs_fifo_dma_push_handler = = pkt - > handler )
return fifo - > tx_chan ;
if ( & usbhs_fifo_dma_pop_handler = = pkt - > handler )
return fifo - > rx_chan ;
return NULL ;
}
static struct usbhs_fifo * usbhsf_get_dma_fifo ( struct usbhs_priv * priv ,
struct usbhs_pkt * pkt )
{
struct usbhs_fifo * fifo ;
2014-11-10 20:02:45 +09:00
int i ;
2011-06-06 14:19:03 +09:00
2014-11-10 20:02:45 +09:00
usbhs_for_each_dfifo ( priv , fifo , i ) {
if ( usbhsf_dma_chan_get ( fifo , pkt ) & &
! usbhsf_fifo_is_busy ( fifo ) )
return fifo ;
}
2011-06-06 14:19:03 +09:00
return NULL ;
}
# define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
# define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
static void __usbhsf_dma_ctrl ( struct usbhs_pipe * pipe ,
struct usbhs_fifo * fifo ,
u16 dreqe )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
usbhs_bset ( priv , fifo - > sel , DREQE , dreqe ) ;
}
static int __usbhsf_dma_map_ctrl ( struct usbhs_pkt * pkt , int map )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_pipe_info * info = usbhs_priv_to_pipeinfo ( priv ) ;
2016-04-18 16:53:41 +09:00
struct usbhs_fifo * fifo = usbhs_pipe_to_fifo ( pipe ) ;
struct dma_chan * chan = usbhsf_dma_chan_get ( fifo , pkt ) ;
2011-06-06 14:19:03 +09:00
2016-04-18 16:53:41 +09:00
return info - > dma_map_ctrl ( chan - > device - > dev , pkt , map ) ;
2011-06-06 14:19:03 +09:00
}
static void usbhsf_dma_complete ( void * arg ) ;
2012-02-14 11:37:21 +01:00
static void xfer_work ( struct work_struct * work )
2011-06-06 14:19:03 +09:00
{
2012-02-14 11:37:21 +01:00
struct usbhs_pkt * pkt = container_of ( work , struct usbhs_pkt , work ) ;
2011-06-06 14:19:03 +09:00
struct usbhs_pipe * pipe = pkt - > pipe ;
2016-06-08 16:32:49 +09:00
struct usbhs_fifo * fifo ;
2011-06-06 14:19:03 +09:00
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct dma_async_tx_descriptor * desc ;
2016-06-08 16:32:49 +09:00
struct dma_chan * chan ;
2011-06-06 14:19:03 +09:00
struct device * dev = usbhs_priv_to_dev ( priv ) ;
2011-10-14 21:57:58 +05:30
enum dma_transfer_direction dir ;
2016-06-08 16:32:49 +09:00
unsigned long flags ;
usbhs_lock ( priv , flags ) ;
fifo = usbhs_pipe_to_fifo ( pipe ) ;
if ( ! fifo )
goto xfer_work_end ;
2011-06-06 14:19:03 +09:00
2016-06-08 16:32:49 +09:00
chan = usbhsf_dma_chan_get ( fifo , pkt ) ;
2011-10-14 21:57:58 +05:30
dir = usbhs_pipe_is_dir_in ( pipe ) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV ;
2011-06-06 14:19:03 +09:00
2012-07-08 23:10:28 -07:00
desc = dmaengine_prep_slave_single ( chan , pkt - > dma + pkt - > actual ,
pkt - > trans , dir ,
2012-03-08 16:11:18 -05:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2011-06-06 14:19:03 +09:00
if ( ! desc )
2016-06-08 16:32:49 +09:00
goto xfer_work_end ;
2011-06-06 14:19:03 +09:00
desc - > callback = usbhsf_dma_complete ;
desc - > callback_param = pipe ;
2015-03-12 15:35:20 +09:00
pkt - > cookie = dmaengine_submit ( desc ) ;
if ( pkt - > cookie < 0 ) {
2011-06-06 14:19:03 +09:00
dev_err ( dev , " Failed to submit dma descriptor \n " ) ;
2016-06-08 16:32:49 +09:00
goto xfer_work_end ;
2011-06-06 14:19:03 +09:00
}
dev_dbg ( dev , " %s %d (%d/ %d) \n " ,
fifo - > name , usbhs_pipe_number ( pipe ) , pkt - > length , pkt - > zero ) ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 1 ) ;
2015-03-12 15:35:19 +09:00
usbhs_pipe_set_trans_count_if_bulk ( pipe , pkt - > trans ) ;
2011-06-06 14:19:03 +09:00
dma_async_issue_pending ( chan ) ;
2017-10-02 14:01:41 +09:00
usbhsf_dma_start ( pipe , fifo ) ;
2015-03-12 15:35:19 +09:00
usbhs_pipe_enable ( pipe ) ;
2016-06-08 16:32:49 +09:00
xfer_work_end :
usbhs_unlock ( priv , flags ) ;
2011-06-06 14:19:03 +09:00
}
2011-07-07 00:23:24 -07:00
/*
* DMA push handler
*/
2011-06-06 14:19:03 +09:00
static int usbhsf_dma_prepare_push ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo ;
int len = pkt - > length - pkt - > actual ;
int ret ;
2015-03-12 15:35:20 +09:00
uintptr_t align_mask ;
2011-06-06 14:19:03 +09:00
if ( usbhs_pipe_is_busy ( pipe ) )
return 0 ;
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ( ( len < usbhs_get_dparam ( priv , pio_dma_border ) ) | |
2016-08-08 21:50:53 +09:00
usbhs_pipe_type_is ( pipe , USB_ENDPOINT_XFER_ISOC ) )
2011-06-06 14:19:03 +09:00
goto usbhsf_pio_prepare_push ;
2015-03-12 15:35:20 +09:00
/* check data length if this driver don't use USB-DMAC */
if ( ! usbhs_get_dparam ( priv , has_usb_dmac ) & & len & 0x7 )
2011-06-06 14:19:03 +09:00
goto usbhsf_pio_prepare_push ;
2015-03-12 15:35:20 +09:00
/* check buffer alignment */
align_mask = usbhs_get_dparam ( priv , has_usb_dmac ) ?
USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7 ;
if ( ( uintptr_t ) ( pkt - > buf + pkt - > actual ) & align_mask )
2011-07-03 17:42:19 -07:00
goto usbhsf_pio_prepare_push ;
2014-08-22 20:13:50 +09:00
/* return at this time if the pipe is running */
if ( usbhs_pipe_is_running ( pipe ) )
return 0 ;
2011-06-06 14:19:03 +09:00
/* get enable DMA fifo */
fifo = usbhsf_get_dma_fifo ( priv , pkt ) ;
if ( ! fifo )
goto usbhsf_pio_prepare_push ;
ret = usbhsf_fifo_select ( pipe , fifo , 0 ) ;
if ( ret < 0 )
2016-04-18 16:53:40 +09:00
goto usbhsf_pio_prepare_push ;
if ( usbhsf_dma_map ( pkt ) < 0 )
goto usbhsf_pio_prepare_push_unselect ;
2011-06-06 14:19:03 +09:00
pkt - > trans = len ;
2016-03-10 11:30:15 +09:00
usbhsf_tx_irq_ctrl ( pipe , 0 ) ;
2012-02-14 11:37:21 +01:00
INIT_WORK ( & pkt - > work , xfer_work ) ;
schedule_work ( & pkt - > work ) ;
2011-06-06 14:19:03 +09:00
return 0 ;
2016-04-18 16:53:40 +09:00
usbhsf_pio_prepare_push_unselect :
usbhsf_fifo_unselect ( pipe , fifo ) ;
2011-06-06 14:19:03 +09:00
usbhsf_pio_prepare_push :
/*
* change handler to PIO
*/
pkt - > handler = & usbhs_fifo_pio_push_handler ;
return pkt - > handler - > prepare ( pkt , is_done ) ;
}
static int usbhsf_dma_push_done ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
2014-08-22 20:14:10 +09:00
int is_short = pkt - > trans % usbhs_pipe_get_maxpacket ( pipe ) ;
2011-06-06 14:19:03 +09:00
2014-08-22 20:14:10 +09:00
pkt - > actual + = pkt - > trans ;
if ( pkt - > actual < pkt - > length )
* is_done = 0 ; /* there are remainder data */
else if ( is_short )
* is_done = 1 ; /* short packet */
else
* is_done = ! pkt - > zero ; /* send zero packet? */
2011-06-06 14:19:03 +09:00
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , ! * is_done ) ;
2011-06-06 14:19:03 +09:00
usbhsf_dma_stop ( pipe , pipe - > fifo ) ;
usbhsf_dma_unmap ( pkt ) ;
usbhsf_fifo_unselect ( pipe , pipe - > fifo ) ;
2014-08-22 20:14:10 +09:00
if ( ! * is_done ) {
/* change handler to PIO */
pkt - > handler = & usbhs_fifo_pio_push_handler ;
return pkt - > handler - > try_run ( pkt , is_done ) ;
}
2011-06-06 14:19:03 +09:00
return 0 ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
2011-06-06 14:19:03 +09:00
. prepare = usbhsf_dma_prepare_push ,
. dma_done = usbhsf_dma_push_done ,
} ;
2011-07-07 00:23:24 -07:00
/*
* DMA pop handler
*/
2015-03-12 15:35:20 +09:00
static int usbhsf_dma_prepare_pop_with_rx_irq ( struct usbhs_pkt * pkt ,
int * is_done )
{
return usbhsf_prepare_pop ( pkt , is_done ) ;
}
static int usbhsf_dma_prepare_pop_with_usb_dmac ( struct usbhs_pkt * pkt ,
int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo ;
int ret ;
if ( usbhs_pipe_is_busy ( pipe ) )
return 0 ;
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ( ( pkt - > length < usbhs_get_dparam ( priv , pio_dma_border ) ) | |
2016-08-08 21:50:53 +09:00
usbhs_pipe_type_is ( pipe , USB_ENDPOINT_XFER_ISOC ) )
2015-03-12 15:35:20 +09:00
goto usbhsf_pio_prepare_pop ;
fifo = usbhsf_get_dma_fifo ( priv , pkt ) ;
if ( ! fifo )
goto usbhsf_pio_prepare_pop ;
if ( ( uintptr_t ) pkt - > buf & ( USBHS_USB_DMAC_XFER_SIZE - 1 ) )
goto usbhsf_pio_prepare_pop ;
usbhs_pipe_config_change_bfre ( pipe , 1 ) ;
ret = usbhsf_fifo_select ( pipe , fifo , 0 ) ;
if ( ret < 0 )
goto usbhsf_pio_prepare_pop ;
if ( usbhsf_dma_map ( pkt ) < 0 )
goto usbhsf_pio_prepare_pop_unselect ;
/* DMA */
/*
* usbhs_fifo_dma_pop_handler : : prepare
* enabled irq to come here .
* but it is no longer needed for DMA . disable it .
*/
usbhsf_rx_irq_ctrl ( pipe , 0 ) ;
pkt - > trans = pkt - > length ;
INIT_WORK ( & pkt - > work , xfer_work ) ;
schedule_work ( & pkt - > work ) ;
return 0 ;
usbhsf_pio_prepare_pop_unselect :
usbhsf_fifo_unselect ( pipe , fifo ) ;
usbhsf_pio_prepare_pop :
/*
* change handler to PIO
*/
pkt - > handler = & usbhs_fifo_pio_pop_handler ;
usbhs_pipe_config_change_bfre ( pipe , 0 ) ;
return pkt - > handler - > prepare ( pkt , is_done ) ;
}
static int usbhsf_dma_prepare_pop ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pkt - > pipe ) ;
if ( usbhs_get_dparam ( priv , has_usb_dmac ) )
return usbhsf_dma_prepare_pop_with_usb_dmac ( pkt , is_done ) ;
else
return usbhsf_dma_prepare_pop_with_rx_irq ( pkt , is_done ) ;
}
static int usbhsf_dma_try_pop_with_rx_irq ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:19:03 +09:00
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo ;
int len , ret ;
if ( usbhs_pipe_is_busy ( pipe ) )
return 0 ;
if ( usbhs_pipe_is_dcp ( pipe ) )
goto usbhsf_pio_prepare_pop ;
/* get enable DMA fifo */
fifo = usbhsf_get_dma_fifo ( priv , pkt ) ;
if ( ! fifo )
goto usbhsf_pio_prepare_pop ;
2011-10-26 01:21:07 -07:00
if ( ( uintptr_t ) ( pkt - > buf + pkt - > actual ) & 0x7 ) /* 8byte alignment */
2011-07-03 17:42:19 -07:00
goto usbhsf_pio_prepare_pop ;
2011-06-06 14:19:03 +09:00
ret = usbhsf_fifo_select ( pipe , fifo , 0 ) ;
if ( ret < 0 )
goto usbhsf_pio_prepare_pop ;
/* use PIO if packet is less than pio_dma_border */
len = usbhsf_fifo_rcv_len ( priv , fifo ) ;
len = min ( pkt - > length - pkt - > actual , len ) ;
2012-08-22 18:01:13 -07:00
if ( len & 0x7 ) /* 8byte alignment */
2011-06-06 14:19:03 +09:00
goto usbhsf_pio_prepare_pop_unselect ;
if ( len < usbhs_get_dparam ( priv , pio_dma_border ) )
goto usbhsf_pio_prepare_pop_unselect ;
ret = usbhsf_fifo_barrier ( priv , fifo ) ;
if ( ret < 0 )
goto usbhsf_pio_prepare_pop_unselect ;
if ( usbhsf_dma_map ( pkt ) < 0 )
goto usbhsf_pio_prepare_pop_unselect ;
/* DMA */
/*
* usbhs_fifo_dma_pop_handler : : prepare
* enabled irq to come here .
* but it is no longer needed for DMA . disable it .
*/
usbhsf_rx_irq_ctrl ( pipe , 0 ) ;
pkt - > trans = len ;
2012-02-14 11:37:21 +01:00
INIT_WORK ( & pkt - > work , xfer_work ) ;
schedule_work ( & pkt - > work ) ;
2011-06-06 14:19:03 +09:00
return 0 ;
usbhsf_pio_prepare_pop_unselect :
usbhsf_fifo_unselect ( pipe , fifo ) ;
usbhsf_pio_prepare_pop :
/*
* change handler to PIO
*/
pkt - > handler = & usbhs_fifo_pio_pop_handler ;
return pkt - > handler - > try_run ( pkt , is_done ) ;
}
2015-03-12 15:35:20 +09:00
static int usbhsf_dma_try_pop ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pkt - > pipe ) ;
BUG_ON ( usbhs_get_dparam ( priv , has_usb_dmac ) ) ;
return usbhsf_dma_try_pop_with_rx_irq ( pkt , is_done ) ;
}
static int usbhsf_dma_pop_done_with_rx_irq ( struct usbhs_pkt * pkt , int * is_done )
2011-06-06 14:19:03 +09:00
{
struct usbhs_pipe * pipe = pkt - > pipe ;
int maxp = usbhs_pipe_get_maxpacket ( pipe ) ;
usbhsf_dma_stop ( pipe , pipe - > fifo ) ;
usbhsf_dma_unmap ( pkt ) ;
usbhsf_fifo_unselect ( pipe , pipe - > fifo ) ;
pkt - > actual + = pkt - > trans ;
if ( ( pkt - > actual = = pkt - > length ) | | /* receive all data */
( pkt - > trans < maxp ) ) { /* short packet */
* is_done = 1 ;
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 0 ) ;
2011-06-06 14:19:03 +09:00
} else {
/* re-enable */
2014-08-22 20:13:50 +09:00
usbhs_pipe_running ( pipe , 0 ) ;
2011-06-06 14:19:03 +09:00
usbhsf_prepare_pop ( pkt , is_done ) ;
}
return 0 ;
}
2015-03-12 15:35:20 +09:00
static size_t usbhs_dma_calc_received_size ( struct usbhs_pkt * pkt ,
struct dma_chan * chan , int dtln )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct dma_tx_state state ;
size_t received_size ;
int maxp = usbhs_pipe_get_maxpacket ( pipe ) ;
dmaengine_tx_status ( chan , pkt - > cookie , & state ) ;
received_size = pkt - > length - state . residue ;
if ( dtln ) {
received_size - = USBHS_USB_DMAC_XFER_SIZE ;
received_size & = ~ ( maxp - 1 ) ;
received_size + = dtln ;
}
return received_size ;
}
static int usbhsf_dma_pop_done_with_usb_dmac ( struct usbhs_pkt * pkt ,
int * is_done )
{
struct usbhs_pipe * pipe = pkt - > pipe ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhs_pipe_to_fifo ( pipe ) ;
struct dma_chan * chan = usbhsf_dma_chan_get ( fifo , pkt ) ;
int rcv_len ;
/*
* Since the driver disables rx_irq in DMA mode , the interrupt handler
* cannot the BRDYSTS . So , the function clears it here because the
* driver may use PIO mode next time .
*/
usbhs_xxxsts_clear ( priv , BRDYSTS , usbhs_pipe_number ( pipe ) ) ;
rcv_len = usbhsf_fifo_rcv_len ( priv , fifo ) ;
usbhsf_fifo_clear ( pipe , fifo ) ;
pkt - > actual = usbhs_dma_calc_received_size ( pkt , chan , rcv_len ) ;
usbhsf_dma_stop ( pipe , fifo ) ;
usbhsf_dma_unmap ( pkt ) ;
usbhsf_fifo_unselect ( pipe , pipe - > fifo ) ;
/* The driver can assume the rx transaction is always "done" */
* is_done = 1 ;
return 0 ;
}
static int usbhsf_dma_pop_done ( struct usbhs_pkt * pkt , int * is_done )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pkt - > pipe ) ;
if ( usbhs_get_dparam ( priv , has_usb_dmac ) )
return usbhsf_dma_pop_done_with_usb_dmac ( pkt , is_done ) ;
else
return usbhsf_dma_pop_done_with_rx_irq ( pkt , is_done ) ;
}
2015-12-27 21:50:29 +01:00
const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
2015-03-12 15:35:20 +09:00
. prepare = usbhsf_dma_prepare_pop ,
2011-06-06 14:19:03 +09:00
. try_run = usbhsf_dma_try_pop ,
. dma_done = usbhsf_dma_pop_done
} ;
/*
* DMA setting
*/
static bool usbhsf_dma_filter ( struct dma_chan * chan , void * param )
{
struct sh_dmae_slave * slave = param ;
/*
* FIXME
*
* usbhs doesn ' t recognize id = 0 as valid DMA
*/
2012-05-09 17:09:19 +02:00
if ( 0 = = slave - > shdma_slave . slave_id )
2011-06-06 14:19:03 +09:00
return false ;
chan - > private = slave ;
return true ;
}
static void usbhsf_dma_quit ( struct usbhs_priv * priv , struct usbhs_fifo * fifo )
{
if ( fifo - > tx_chan )
dma_release_channel ( fifo - > tx_chan ) ;
if ( fifo - > rx_chan )
dma_release_channel ( fifo - > rx_chan ) ;
fifo - > tx_chan = NULL ;
fifo - > rx_chan = NULL ;
}
2015-01-19 12:53:16 +09:00
static void usbhsf_dma_init_pdev ( struct usbhs_fifo * fifo )
2011-06-06 14:19:03 +09:00
{
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
fifo - > tx_chan = dma_request_channel ( mask , usbhsf_dma_filter ,
& fifo - > tx_slave ) ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
fifo - > rx_chan = dma_request_channel ( mask , usbhsf_dma_filter ,
& fifo - > rx_slave ) ;
2015-01-19 12:53:16 +09:00
}
2015-03-12 15:35:18 +09:00
static void usbhsf_dma_init_dt ( struct device * dev , struct usbhs_fifo * fifo ,
int channel )
2015-01-19 12:53:17 +09:00
{
2015-03-12 15:35:18 +09:00
char name [ 16 ] ;
2015-04-08 19:42:24 +09:00
/*
* To avoid complex handing for DnFIFOs , the driver uses each
* DnFIFO as TX or RX direction ( not bi - direction ) .
* So , the driver uses odd channels for TX , even channels for RX .
*/
snprintf ( name , sizeof ( name ) , " ch%d " , channel ) ;
if ( channel & 1 ) {
fifo - > tx_chan = dma_request_slave_channel_reason ( dev , name ) ;
if ( IS_ERR ( fifo - > tx_chan ) )
fifo - > tx_chan = NULL ;
} else {
fifo - > rx_chan = dma_request_slave_channel_reason ( dev , name ) ;
if ( IS_ERR ( fifo - > rx_chan ) )
fifo - > rx_chan = NULL ;
}
2015-01-19 12:53:17 +09:00
}
2015-03-12 15:35:18 +09:00
static void usbhsf_dma_init ( struct usbhs_priv * priv , struct usbhs_fifo * fifo ,
int channel )
2015-01-19 12:53:16 +09:00
{
struct device * dev = usbhs_priv_to_dev ( priv ) ;
2015-01-19 12:53:17 +09:00
if ( dev - > of_node )
2015-03-12 15:35:18 +09:00
usbhsf_dma_init_dt ( dev , fifo , channel ) ;
2015-01-19 12:53:17 +09:00
else
usbhsf_dma_init_pdev ( fifo ) ;
2011-06-06 14:19:03 +09:00
if ( fifo - > tx_chan | | fifo - > rx_chan )
2011-06-21 09:33:43 +09:00
dev_dbg ( dev , " enable DMAEngine (%s%s%s) \n " ,
2011-06-06 14:19:03 +09:00
fifo - > name ,
fifo - > tx_chan ? " [TX] " : " " ,
fifo - > rx_chan ? " [RX] " : " " ) ;
}
2011-06-06 14:18:28 +09:00
/*
* irq functions
*/
static int usbhsf_irq_empty ( struct usbhs_priv * priv ,
struct usbhs_irq_state * irq_state )
{
struct usbhs_pipe * pipe ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int i , ret ;
if ( ! irq_state - > bempsts ) {
dev_err ( dev , " debug %s !! \n " , __func__ ) ;
return - EIO ;
}
dev_dbg ( dev , " irq empty [0x%04x] \n " , irq_state - > bempsts ) ;
/*
* search interrupted " pipe "
* not " uep " .
*/
usbhs_for_each_pipe_with_dcp ( pipe , priv , i ) {
if ( ! ( irq_state - > bempsts & ( 1 < < i ) ) )
continue ;
2011-10-10 21:58:45 -07:00
ret = usbhsf_pkt_handler ( pipe , USBHSF_PKT_TRY_RUN ) ;
2011-06-06 14:18:28 +09:00
if ( ret < 0 )
dev_err ( dev , " irq_empty run_error %d : %d \n " , i , ret ) ;
}
return 0 ;
}
static int usbhsf_irq_ready ( struct usbhs_priv * priv ,
struct usbhs_irq_state * irq_state )
{
struct usbhs_pipe * pipe ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int i , ret ;
if ( ! irq_state - > brdysts ) {
dev_err ( dev , " debug %s !! \n " , __func__ ) ;
return - EIO ;
}
dev_dbg ( dev , " irq ready [0x%04x] \n " , irq_state - > brdysts ) ;
/*
* search interrupted " pipe "
* not " uep " .
*/
usbhs_for_each_pipe_with_dcp ( pipe , priv , i ) {
if ( ! ( irq_state - > brdysts & ( 1 < < i ) ) )
continue ;
2011-10-10 21:58:45 -07:00
ret = usbhsf_pkt_handler ( pipe , USBHSF_PKT_TRY_RUN ) ;
2011-06-06 14:18:28 +09:00
if ( ret < 0 )
dev_err ( dev , " irq_ready run_error %d : %d \n " , i , ret ) ;
}
return 0 ;
}
2011-06-06 14:19:03 +09:00
static void usbhsf_dma_complete ( void * arg )
{
struct usbhs_pipe * pipe = arg ;
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct device * dev = usbhs_priv_to_dev ( priv ) ;
int ret ;
2011-10-10 21:58:45 -07:00
ret = usbhsf_pkt_handler ( pipe , USBHSF_PKT_DMA_DONE ) ;
2011-06-06 14:19:03 +09:00
if ( ret < 0 )
dev_err ( dev , " dma_complete run_error %d : %d \n " ,
usbhs_pipe_number ( pipe ) , ret ) ;
}
2014-11-04 10:05:45 +09:00
void usbhs_fifo_clear_dcp ( struct usbhs_pipe * pipe )
{
struct usbhs_priv * priv = usbhs_pipe_to_priv ( pipe ) ;
struct usbhs_fifo * fifo = usbhsf_get_cfifo ( priv ) ; /* CFIFO */
/* clear DCP FIFO of transmission */
if ( usbhsf_fifo_select ( pipe , fifo , 1 ) < 0 )
return ;
usbhsf_fifo_clear ( pipe , fifo ) ;
usbhsf_fifo_unselect ( pipe , fifo ) ;
/* clear DCP FIFO of reception */
if ( usbhsf_fifo_select ( pipe , fifo , 0 ) < 0 )
return ;
usbhsf_fifo_clear ( pipe , fifo ) ;
usbhsf_fifo_unselect ( pipe , fifo ) ;
}
2011-06-06 14:18:28 +09:00
/*
* fifo init
*/
void usbhs_fifo_init ( struct usbhs_priv * priv )
{
struct usbhs_mod * mod = usbhs_mod_get_current ( priv ) ;
2011-06-06 14:18:50 +09:00
struct usbhs_fifo * cfifo = usbhsf_get_cfifo ( priv ) ;
2014-11-10 20:02:45 +09:00
struct usbhs_fifo * dfifo ;
int i ;
2011-06-06 14:18:28 +09:00
mod - > irq_empty = usbhsf_irq_empty ;
mod - > irq_ready = usbhsf_irq_ready ;
mod - > irq_bempsts = 0 ;
mod - > irq_brdysts = 0 ;
2011-06-06 14:18:50 +09:00
cfifo - > pipe = NULL ;
2014-11-10 20:02:45 +09:00
usbhs_for_each_dfifo ( priv , dfifo , i )
dfifo - > pipe = NULL ;
2011-06-06 14:18:28 +09:00
}
void usbhs_fifo_quit ( struct usbhs_priv * priv )
{
struct usbhs_mod * mod = usbhs_mod_get_current ( priv ) ;
mod - > irq_empty = NULL ;
mod - > irq_ready = NULL ;
mod - > irq_bempsts = 0 ;
mod - > irq_brdysts = 0 ;
}
2011-06-06 14:18:44 +09:00
2014-11-10 20:02:46 +09:00
# define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
2014-11-10 20:02:45 +09:00
do { \
fifo = usbhsf_get_dnfifo ( priv , channel ) ; \
fifo - > name = " D " # channel " FIFO " ; \
2014-11-10 20:02:46 +09:00
fifo - > port = fifo_port ; \
2014-11-10 20:02:45 +09:00
fifo - > sel = D # # channel # # FIFOSEL ; \
fifo - > ctr = D # # channel # # FIFOCTR ; \
fifo - > tx_slave . shdma_slave . slave_id = \
usbhs_get_dparam ( priv , d # # channel # # _tx_id ) ; \
fifo - > rx_slave . shdma_slave . slave_id = \
usbhs_get_dparam ( priv , d # # channel # # _rx_id ) ; \
2015-03-12 15:35:18 +09:00
usbhsf_dma_init ( priv , fifo , channel ) ; \
2014-11-10 20:02:45 +09:00
} while ( 0 )
2014-11-10 20:02:46 +09:00
# define USBHS_DFIFO_INIT(priv, fifo, channel) \
__USBHS_DFIFO_INIT ( priv , fifo , channel , D # # channel # # FIFO )
# define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
__USBHS_DFIFO_INIT ( priv , fifo , channel , 0 )
2011-06-06 14:18:44 +09:00
int usbhs_fifo_probe ( struct usbhs_priv * priv )
{
struct usbhs_fifo * fifo ;
/* CFIFO */
fifo = usbhsf_get_cfifo ( priv ) ;
2011-06-06 14:19:03 +09:00
fifo - > name = " CFIFO " ;
2011-06-06 14:18:44 +09:00
fifo - > port = CFIFO ;
fifo - > sel = CFIFOSEL ;
fifo - > ctr = CFIFOCTR ;
2014-11-10 20:02:45 +09:00
/* DFIFO */
USBHS_DFIFO_INIT ( priv , fifo , 0 ) ;
USBHS_DFIFO_INIT ( priv , fifo , 1 ) ;
2014-11-10 20:02:47 +09:00
USBHS_DFIFO_INIT_NO_PORT ( priv , fifo , 2 ) ;
USBHS_DFIFO_INIT_NO_PORT ( priv , fifo , 3 ) ;
2011-06-06 14:19:03 +09:00
2011-06-06 14:18:44 +09:00
return 0 ;
}
void usbhs_fifo_remove ( struct usbhs_priv * priv )
{
2014-11-10 20:02:45 +09:00
struct usbhs_fifo * fifo ;
int i ;
usbhs_for_each_dfifo ( priv , fifo , i )
usbhsf_dma_quit ( priv , fifo ) ;
2011-06-06 14:18:44 +09:00
}