2005-04-17 02:20:36 +04:00
/*
* Universal Host Controller Interface driver for USB .
*
* Maintainer : Alan Stern < stern @ rowland . harvard . edu >
*
* ( C ) Copyright 1999 Linus Torvalds
* ( C ) Copyright 1999 - 2002 Johannes Erdfelt , johannes @ erdfelt . com
* ( C ) Copyright 1999 Randy Dunlap
* ( C ) Copyright 1999 Georg Acher , acher @ in . tum . de
* ( C ) Copyright 1999 Deti Fliegl , deti @ fliegl . de
* ( C ) Copyright 1999 Thomas Sailer , sailer @ ife . ee . ethz . ch
* ( C ) Copyright 1999 Roman Weissgaerber , weissg @ vienna . at
* ( C ) Copyright 2000 Yggdrasil Computing , Inc . ( port of new PCI interface
* support from usb - ohci . c by Adam Richter , adam @ yggdrasil . com ) .
* ( C ) Copyright 1999 Gregory P . Smith ( from usb - ohci . c )
2005-12-18 01:58:46 +03:00
* ( C ) Copyright 2004 - 2005 Alan Stern , stern @ rowland . harvard . edu
2005-04-17 02:20:36 +04:00
*/
static void uhci_free_pending_tds ( struct uhci_hcd * uhci ) ;
/*
* Technically , updating td - > status here is a race , but it ' s not really a
* problem . The worst that can happen is that we set the IOC bit again
* generating a spurious interrupt . We could fix this by creating another
* QH and leaving the IOC bit always set , but then we would have to play
* games with the FSBR code to make sure we get the correct order in all
* the cases . I don ' t think it ' s worth the effort
*/
2005-12-18 01:58:46 +03:00
static void uhci_set_next_interrupt ( struct uhci_hcd * uhci )
2005-04-17 02:20:36 +04:00
{
2005-04-22 00:04:58 +04:00
if ( uhci - > is_stopped )
2005-09-05 21:59:51 +04:00
mod_timer ( & uhci_to_hcd ( uhci ) - > rh_timer , jiffies ) ;
2005-04-17 02:20:36 +04:00
uhci - > term_td - > status | = cpu_to_le32 ( TD_CTRL_IOC ) ;
}
static inline void uhci_clear_next_interrupt ( struct uhci_hcd * uhci )
{
uhci - > term_td - > status & = ~ cpu_to_le32 ( TD_CTRL_IOC ) ;
}
2005-04-25 19:14:31 +04:00
static struct uhci_td * uhci_alloc_td ( struct uhci_hcd * uhci )
2005-04-17 02:20:36 +04:00
{
dma_addr_t dma_handle ;
struct uhci_td * td ;
td = dma_pool_alloc ( uhci - > td_pool , GFP_ATOMIC , & dma_handle ) ;
if ( ! td )
return NULL ;
td - > dma_handle = dma_handle ;
td - > frame = - 1 ;
INIT_LIST_HEAD ( & td - > list ) ;
INIT_LIST_HEAD ( & td - > remove_list ) ;
INIT_LIST_HEAD ( & td - > fl_list ) ;
return td ;
}
2005-12-18 01:58:46 +03:00
static void uhci_free_td ( struct uhci_hcd * uhci , struct uhci_td * td )
{
if ( ! list_empty ( & td - > list ) )
dev_warn ( uhci_dev ( uhci ) , " td %p still in list! \n " , td ) ;
if ( ! list_empty ( & td - > remove_list ) )
dev_warn ( uhci_dev ( uhci ) , " td %p still in remove_list! \n " , td ) ;
if ( ! list_empty ( & td - > fl_list ) )
dev_warn ( uhci_dev ( uhci ) , " td %p still in fl_list! \n " , td ) ;
dma_pool_free ( uhci - > td_pool , td , td - > dma_handle ) ;
}
2005-04-17 02:20:36 +04:00
static inline void uhci_fill_td ( struct uhci_td * td , u32 status ,
u32 token , u32 buffer )
{
td - > status = cpu_to_le32 ( status ) ;
td - > token = cpu_to_le32 ( token ) ;
td - > buffer = cpu_to_le32 ( buffer ) ;
}
/*
2005-12-01 01:16:19 +03:00
* We insert Isochronous URBs directly into the frame list at the beginning
2005-04-17 02:20:36 +04:00
*/
2005-12-18 01:58:46 +03:00
static inline void uhci_insert_td_in_frame_list ( struct uhci_hcd * uhci ,
struct uhci_td * td , unsigned framenum )
2005-04-17 02:20:36 +04:00
{
framenum & = ( UHCI_NUMFRAMES - 1 ) ;
td - > frame = framenum ;
/* Is there a TD already mapped there? */
2005-09-16 22:22:51 +04:00
if ( uhci - > frame_cpu [ framenum ] ) {
2005-04-17 02:20:36 +04:00
struct uhci_td * ftd , * ltd ;
2005-09-16 22:22:51 +04:00
ftd = uhci - > frame_cpu [ framenum ] ;
2005-04-17 02:20:36 +04:00
ltd = list_entry ( ftd - > fl_list . prev , struct uhci_td , fl_list ) ;
list_add_tail ( & td - > fl_list , & ftd - > fl_list ) ;
td - > link = ltd - > link ;
wmb ( ) ;
ltd - > link = cpu_to_le32 ( td - > dma_handle ) ;
} else {
2005-09-16 22:22:51 +04:00
td - > link = uhci - > frame [ framenum ] ;
2005-04-17 02:20:36 +04:00
wmb ( ) ;
2005-09-16 22:22:51 +04:00
uhci - > frame [ framenum ] = cpu_to_le32 ( td - > dma_handle ) ;
uhci - > frame_cpu [ framenum ] = td ;
2005-04-17 02:20:36 +04:00
}
}
2005-12-18 01:58:46 +03:00
static inline void uhci_remove_td_from_frame_list ( struct uhci_hcd * uhci ,
2005-10-14 01:00:24 +04:00
struct uhci_td * td )
2005-04-17 02:20:36 +04:00
{
/* If it's not inserted, don't remove it */
2005-10-14 01:00:24 +04:00
if ( td - > frame = = - 1 ) {
WARN_ON ( ! list_empty ( & td - > fl_list ) ) ;
2005-04-17 02:20:36 +04:00
return ;
2005-10-14 01:00:24 +04:00
}
2005-04-17 02:20:36 +04:00
2005-10-14 01:00:24 +04:00
if ( uhci - > frame_cpu [ td - > frame ] = = td ) {
2005-04-17 02:20:36 +04:00
if ( list_empty ( & td - > fl_list ) ) {
2005-09-16 22:22:51 +04:00
uhci - > frame [ td - > frame ] = td - > link ;
uhci - > frame_cpu [ td - > frame ] = NULL ;
2005-04-17 02:20:36 +04:00
} else {
struct uhci_td * ntd ;
ntd = list_entry ( td - > fl_list . next , struct uhci_td , fl_list ) ;
2005-09-16 22:22:51 +04:00
uhci - > frame [ td - > frame ] = cpu_to_le32 ( ntd - > dma_handle ) ;
uhci - > frame_cpu [ td - > frame ] = ntd ;
2005-04-17 02:20:36 +04:00
}
} else {
struct uhci_td * ptd ;
ptd = list_entry ( td - > fl_list . prev , struct uhci_td , fl_list ) ;
ptd - > link = td - > link ;
}
list_del_init ( & td - > fl_list ) ;
td - > frame = - 1 ;
}
2005-12-18 01:58:46 +03:00
/*
* Remove all the TDs for an Isochronous URB from the frame list
*/
static void uhci_unlink_isochronous_tds ( struct uhci_hcd * uhci , struct urb * urb )
2005-10-14 01:00:24 +04:00
{
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
struct uhci_td * td ;
list_for_each_entry ( td , & urbp - > td_list , list )
2005-12-18 01:58:46 +03:00
uhci_remove_td_from_frame_list ( uhci , td ) ;
2005-10-14 01:00:24 +04:00
wmb ( ) ;
}
2005-12-18 01:58:46 +03:00
static struct uhci_qh * uhci_alloc_qh ( struct uhci_hcd * uhci ,
struct usb_device * udev , struct usb_host_endpoint * hep )
2005-04-17 02:20:36 +04:00
{
dma_addr_t dma_handle ;
struct uhci_qh * qh ;
qh = dma_pool_alloc ( uhci - > qh_pool , GFP_ATOMIC , & dma_handle ) ;
if ( ! qh )
return NULL ;
2006-05-12 19:19:19 +04:00
memset ( qh , 0 , sizeof ( * qh ) ) ;
2005-04-17 02:20:36 +04:00
qh - > dma_handle = dma_handle ;
qh - > element = UHCI_PTR_TERM ;
qh - > link = UHCI_PTR_TERM ;
2005-12-18 01:58:46 +03:00
INIT_LIST_HEAD ( & qh - > queue ) ;
INIT_LIST_HEAD ( & qh - > node ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
if ( udev ) { /* Normal QH */
2005-12-18 02:00:12 +03:00
qh - > dummy_td = uhci_alloc_td ( uhci ) ;
if ( ! qh - > dummy_td ) {
dma_pool_free ( uhci - > qh_pool , qh , dma_handle ) ;
return NULL ;
}
2005-12-18 01:58:46 +03:00
qh - > state = QH_STATE_IDLE ;
qh - > hep = hep ;
qh - > udev = udev ;
hep - > hcpriv = qh ;
2006-05-06 00:26:58 +04:00
qh - > type = hep - > desc . bmAttributes & USB_ENDPOINT_XFERTYPE_MASK ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
} else { /* Skeleton QH */
qh - > state = QH_STATE_ACTIVE ;
2006-05-06 00:26:58 +04:00
qh - > type = - 1 ;
2005-12-18 01:58:46 +03:00
}
2005-04-17 02:20:36 +04:00
return qh ;
}
static void uhci_free_qh ( struct uhci_hcd * uhci , struct uhci_qh * qh )
{
2005-12-18 01:58:46 +03:00
WARN_ON ( qh - > state ! = QH_STATE_IDLE & & qh - > udev ) ;
if ( ! list_empty ( & qh - > queue ) )
2005-04-17 02:20:36 +04:00
dev_warn ( uhci_dev ( uhci ) , " qh %p list not empty! \n " , qh ) ;
2005-12-18 01:58:46 +03:00
list_del ( & qh - > node ) ;
if ( qh - > udev ) {
qh - > hep - > hcpriv = NULL ;
2005-12-18 02:00:12 +03:00
uhci_free_td ( uhci , qh - > dummy_td ) ;
2005-12-18 01:58:46 +03:00
}
2005-04-17 02:20:36 +04:00
dma_pool_free ( uhci - > qh_pool , qh , qh - > dma_handle ) ;
}
2005-12-18 02:02:38 +03:00
/*
* When the currently executing URB is dequeued , save its current toggle value
*/
static void uhci_save_toggle ( struct uhci_qh * qh , struct urb * urb )
{
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
struct uhci_td * td ;
/* If the QH element pointer is UHCI_PTR_TERM then then currently
* executing URB has already been unlinked , so this one isn ' t it . */
if ( qh_element ( qh ) = = UHCI_PTR_TERM | |
qh - > queue . next ! = & urbp - > node )
return ;
qh - > element = UHCI_PTR_TERM ;
/* Only bulk and interrupt pipes have to worry about toggles */
2006-05-06 00:26:58 +04:00
if ( ! ( qh - > type = = USB_ENDPOINT_XFER_BULK | |
qh - > type = = USB_ENDPOINT_XFER_INT ) )
2005-12-18 02:02:38 +03:00
return ;
2006-05-12 19:19:19 +04:00
WARN_ON ( list_empty ( & urbp - > td_list ) ) ;
td = list_entry ( urbp - > td_list . next , struct uhci_td , list ) ;
qh - > needs_fixup = 1 ;
qh - > initial_toggle = uhci_toggle ( td_token ( td ) ) ;
2005-12-18 02:02:38 +03:00
}
/*
* Fix up the data toggles for URBs in a queue , when one of them
* terminates early ( short transfer , error , or dequeued ) .
*/
static void uhci_fixup_toggles ( struct uhci_qh * qh , int skip_first )
{
struct urb_priv * urbp = NULL ;
struct uhci_td * td ;
unsigned int toggle = qh - > initial_toggle ;
unsigned int pipe ;
/* Fixups for a short transfer start with the second URB in the
* queue ( the short URB is the first ) . */
if ( skip_first )
urbp = list_entry ( qh - > queue . next , struct urb_priv , node ) ;
/* When starting with the first URB, if the QH element pointer is
* still valid then we know the URB ' s toggles are okay . */
else if ( qh_element ( qh ) ! = UHCI_PTR_TERM )
toggle = 2 ;
/* Fix up the toggle for the URBs in the queue. Normally this
* loop won ' t run more than once : When an error or short transfer
* occurs , the queue usually gets emptied . */
2006-01-31 18:02:55 +03:00
urbp = list_prepare_entry ( urbp , & qh - > queue , node ) ;
2005-12-18 02:02:38 +03:00
list_for_each_entry_continue ( urbp , & qh - > queue , node ) {
/* If the first TD has the right toggle value, we don't
* need to change any toggles in this URB */
td = list_entry ( urbp - > td_list . next , struct uhci_td , list ) ;
if ( toggle > 1 | | uhci_toggle ( td_token ( td ) ) = = toggle ) {
td = list_entry ( urbp - > td_list . next , struct uhci_td ,
list ) ;
toggle = uhci_toggle ( td_token ( td ) ) ^ 1 ;
/* Otherwise all the toggles in the URB have to be switched */
} else {
list_for_each_entry ( td , & urbp - > td_list , list ) {
td - > token ^ = __constant_cpu_to_le32 (
TD_TOKEN_TOGGLE ) ;
toggle ^ = 1 ;
}
}
}
wmb ( ) ;
pipe = list_entry ( qh - > queue . next , struct urb_priv , node ) - > urb - > pipe ;
usb_settoggle ( qh - > udev , usb_pipeendpoint ( pipe ) ,
usb_pipeout ( pipe ) , toggle ) ;
qh - > needs_fixup = 0 ;
}
2005-04-17 02:20:36 +04:00
/*
2005-12-18 01:58:46 +03:00
* Put a QH on the schedule in both hardware and software
2005-04-17 02:20:36 +04:00
*/
2005-12-18 01:58:46 +03:00
static void uhci_activate_qh ( struct uhci_hcd * uhci , struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
2005-12-18 01:58:46 +03:00
struct uhci_qh * pqh ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
WARN_ON ( list_empty ( & qh - > queue ) ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* Set the element pointer if it isn't set already.
* This isn ' t needed for Isochronous queues , but it doesn ' t hurt . */
if ( qh_element ( qh ) = = UHCI_PTR_TERM ) {
struct urb_priv * urbp = list_entry ( qh - > queue . next ,
struct urb_priv , node ) ;
struct uhci_td * td = list_entry ( urbp - > td_list . next ,
struct uhci_td , list ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
qh - > element = cpu_to_le32 ( td - > dma_handle ) ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
if ( qh - > state = = QH_STATE_ACTIVE )
return ;
qh - > state = QH_STATE_ACTIVE ;
/* Move the QH from its old list to the end of the appropriate
* skeleton ' s list */
2005-12-18 02:02:38 +03:00
if ( qh = = uhci - > next_qh )
uhci - > next_qh = list_entry ( qh - > node . next , struct uhci_qh ,
node ) ;
2005-12-18 01:58:46 +03:00
list_move_tail ( & qh - > node , & qh - > skel - > node ) ;
/* Link it into the schedule */
pqh = list_entry ( qh - > node . prev , struct uhci_qh , node ) ;
qh - > link = pqh - > link ;
wmb ( ) ;
pqh - > link = UHCI_PTR_QH | cpu_to_le32 ( qh - > dma_handle ) ;
2005-04-17 02:20:36 +04:00
}
/*
2005-12-18 01:58:46 +03:00
* Take a QH off the hardware schedule
2005-04-17 02:20:36 +04:00
*/
2005-12-18 01:58:46 +03:00
static void uhci_unlink_qh ( struct uhci_hcd * uhci , struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
struct uhci_qh * pqh ;
2005-12-18 01:58:46 +03:00
if ( qh - > state = = QH_STATE_UNLINKING )
2005-04-17 02:20:36 +04:00
return ;
2005-12-18 01:58:46 +03:00
WARN_ON ( qh - > state ! = QH_STATE_ACTIVE | | ! qh - > udev ) ;
qh - > state = QH_STATE_UNLINKING ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* Unlink the QH from the schedule and record when we did it */
pqh = list_entry ( qh - > node . prev , struct uhci_qh , node ) ;
pqh - > link = qh - > link ;
mb ( ) ;
2005-04-17 02:20:36 +04:00
uhci_get_current_frame_number ( uhci ) ;
2005-12-18 01:58:46 +03:00
qh - > unlink_frame = uhci - > frame_number ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* Force an interrupt so we know when the QH is fully unlinked */
if ( list_empty ( & uhci - > skel_unlink_qh - > node ) )
2005-04-17 02:20:36 +04:00
uhci_set_next_interrupt ( uhci ) ;
2005-12-18 01:58:46 +03:00
/* Move the QH from its old list to the end of the unlinking list */
2005-12-18 02:02:38 +03:00
if ( qh = = uhci - > next_qh )
uhci - > next_qh = list_entry ( qh - > node . next , struct uhci_qh ,
node ) ;
2005-12-18 01:58:46 +03:00
list_move_tail ( & qh - > node , & uhci - > skel_unlink_qh - > node ) ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
/*
* When we and the controller are through with a QH , it becomes IDLE .
* This happens when a QH has been off the schedule ( on the unlinking
* list ) for more than one frame , or when an error occurs while adding
* the first URB onto a new QH .
*/
static void uhci_make_qh_idle ( struct uhci_hcd * uhci , struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
2005-12-18 01:58:46 +03:00
WARN_ON ( qh - > state = = QH_STATE_ACTIVE ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
if ( qh = = uhci - > next_qh )
uhci - > next_qh = list_entry ( qh - > node . next , struct uhci_qh ,
node ) ;
2005-12-18 01:58:46 +03:00
list_move ( & qh - > node , & uhci - > idle_qh_list ) ;
qh - > state = QH_STATE_IDLE ;
2005-04-17 02:20:36 +04:00
2006-05-12 19:19:19 +04:00
/* Now that the QH is idle, its post_td isn't being used */
if ( qh - > post_td ) {
uhci_free_td ( uhci , qh - > post_td ) ;
qh - > post_td = NULL ;
}
2005-12-18 01:58:46 +03:00
/* If anyone is waiting for a QH to become idle, wake them up */
if ( uhci - > num_waiting )
wake_up_all ( & uhci - > waitqh ) ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
static inline struct urb_priv * uhci_alloc_urb_priv ( struct uhci_hcd * uhci ,
struct urb * urb )
2005-04-17 02:20:36 +04:00
{
struct urb_priv * urbp ;
urbp = kmem_cache_alloc ( uhci_up_cachep , SLAB_ATOMIC ) ;
if ( ! urbp )
return NULL ;
memset ( ( void * ) urbp , 0 , sizeof ( * urbp ) ) ;
urbp - > urb = urb ;
2005-12-18 01:58:46 +03:00
urb - > hcpriv = urbp ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
INIT_LIST_HEAD ( & urbp - > node ) ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & urbp - > td_list ) ;
return urbp ;
}
static void uhci_add_td_to_urb ( struct urb * urb , struct uhci_td * td )
{
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
list_add_tail ( & td - > list , & urbp - > td_list ) ;
}
static void uhci_remove_td_from_urb ( struct uhci_td * td )
{
if ( list_empty ( & td - > list ) )
return ;
list_del_init ( & td - > list ) ;
}
2005-12-18 01:58:46 +03:00
static void uhci_free_urb_priv ( struct uhci_hcd * uhci ,
struct urb_priv * urbp )
2005-04-17 02:20:36 +04:00
{
struct uhci_td * td , * tmp ;
2005-12-18 01:58:46 +03:00
if ( ! list_empty ( & urbp - > node ) )
dev_warn ( uhci_dev ( uhci ) , " urb %p still on QH's list! \n " ,
urbp - > urb ) ;
2005-04-17 02:20:36 +04:00
uhci_get_current_frame_number ( uhci ) ;
if ( uhci - > frame_number + uhci - > is_stopped ! = uhci - > td_remove_age ) {
uhci_free_pending_tds ( uhci ) ;
uhci - > td_remove_age = uhci - > frame_number ;
}
/* Check to see if the remove list is empty. Set the IOC bit */
2005-12-18 01:58:46 +03:00
/* to force an interrupt so we can remove the TDs. */
2005-04-17 02:20:36 +04:00
if ( list_empty ( & uhci - > td_remove_list ) )
uhci_set_next_interrupt ( uhci ) ;
list_for_each_entry_safe ( td , tmp , & urbp - > td_list , list ) {
uhci_remove_td_from_urb ( td ) ;
list_add ( & td - > remove_list , & uhci - > td_remove_list ) ;
}
2005-12-18 01:58:46 +03:00
urbp - > urb - > hcpriv = NULL ;
2005-04-17 02:20:36 +04:00
kmem_cache_free ( uhci_up_cachep , urbp ) ;
}
static void uhci_inc_fsbr ( struct uhci_hcd * uhci , struct urb * urb )
{
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
if ( ( ! ( urb - > transfer_flags & URB_NO_FSBR ) ) & & ! urbp - > fsbr ) {
urbp - > fsbr = 1 ;
if ( ! uhci - > fsbr + + & & ! uhci - > fsbrtimeout )
uhci - > skel_term_qh - > link = cpu_to_le32 ( uhci - > skel_fs_control_qh - > dma_handle ) | UHCI_PTR_QH ;
}
}
static void uhci_dec_fsbr ( struct uhci_hcd * uhci , struct urb * urb )
{
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
if ( ( ! ( urb - > transfer_flags & URB_NO_FSBR ) ) & & urbp - > fsbr ) {
urbp - > fsbr = 0 ;
if ( ! - - uhci - > fsbr )
uhci - > fsbrtimeout = jiffies + FSBR_DELAY ;
}
}
/*
* Map status to standard result codes
*
* < status > is ( td_status ( td ) & 0xF60000 ) , a . k . a .
* uhci_status_bits ( td_status ( td ) ) .
* Note : < status > does not include the TD_CTRL_NAK bit .
* < dir_out > is True for output TDs and False for input TDs .
*/
static int uhci_map_status ( int status , int dir_out )
{
if ( ! status )
return 0 ;
if ( status & TD_CTRL_BITSTUFF ) /* Bitstuff error */
return - EPROTO ;
if ( status & TD_CTRL_CRCTIMEO ) { /* CRC/Timeout */
if ( dir_out )
return - EPROTO ;
else
return - EILSEQ ;
}
if ( status & TD_CTRL_BABBLE ) /* Babble */
return - EOVERFLOW ;
if ( status & TD_CTRL_DBUFERR ) /* Buffer error */
return - ENOSR ;
if ( status & TD_CTRL_STALLED ) /* Stalled */
return - EPIPE ;
WARN_ON ( status & TD_CTRL_ACTIVE ) ; /* Active */
return 0 ;
}
/*
* Control transfers
*/
2005-12-18 01:58:46 +03:00
static int uhci_submit_control ( struct uhci_hcd * uhci , struct urb * urb ,
struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
struct uhci_td * td ;
unsigned long destination , status ;
2005-12-18 01:58:46 +03:00
int maxsze = le16_to_cpu ( qh - > hep - > desc . wMaxPacketSize ) ;
2005-04-17 02:20:36 +04:00
int len = urb - > transfer_buffer_length ;
dma_addr_t data = urb - > transfer_dma ;
2005-12-18 01:58:46 +03:00
__le32 * plink ;
2005-04-17 02:20:36 +04:00
/* The "pipe" thing contains the destination in bits 8--18 */
destination = ( urb - > pipe & PIPE_DEVEP_MASK ) | USB_PID_SETUP ;
2005-12-18 02:00:12 +03:00
/* 3 errors, dummy TD remains inactive */
status = uhci_maxerr ( 3 ) ;
2005-04-17 02:20:36 +04:00
if ( urb - > dev - > speed = = USB_SPEED_LOW )
status | = TD_CTRL_LS ;
/*
* Build the TD for the control request setup packet
*/
2005-12-18 02:00:12 +03:00
td = qh - > dummy_td ;
2005-04-17 02:20:36 +04:00
uhci_add_td_to_urb ( urb , td ) ;
2005-11-30 19:57:51 +03:00
uhci_fill_td ( td , status , destination | uhci_explen ( 8 ) ,
2005-12-18 01:58:46 +03:00
urb - > setup_dma ) ;
plink = & td - > link ;
2005-12-18 02:00:12 +03:00
status | = TD_CTRL_ACTIVE ;
2005-04-17 02:20:36 +04:00
/*
* If direction is " send " , change the packet ID from SETUP ( 0x2D )
* to OUT ( 0xE1 ) . Else change it from SETUP to IN ( 0x69 ) and
* set Short Packet Detect ( SPD ) for all data packets .
*/
if ( usb_pipeout ( urb - > pipe ) )
destination ^ = ( USB_PID_SETUP ^ USB_PID_OUT ) ;
else {
destination ^ = ( USB_PID_SETUP ^ USB_PID_IN ) ;
status | = TD_CTRL_SPD ;
}
/*
2005-12-01 01:16:19 +03:00
* Build the DATA TDs
2005-04-17 02:20:36 +04:00
*/
while ( len > 0 ) {
2005-12-18 01:58:46 +03:00
int pktsze = min ( len , maxsze ) ;
2005-04-17 02:20:36 +04:00
2005-04-25 19:14:31 +04:00
td = uhci_alloc_td ( uhci ) ;
2005-04-17 02:20:36 +04:00
if ( ! td )
2005-12-18 02:00:12 +03:00
goto nomem ;
2005-12-18 01:58:46 +03:00
* plink = cpu_to_le32 ( td - > dma_handle ) ;
2005-04-17 02:20:36 +04:00
/* Alternate Data0/1 (start with Data1) */
destination ^ = TD_TOKEN_TOGGLE ;
uhci_add_td_to_urb ( urb , td ) ;
2005-11-30 19:57:51 +03:00
uhci_fill_td ( td , status , destination | uhci_explen ( pktsze ) ,
2005-12-18 01:58:46 +03:00
data ) ;
plink = & td - > link ;
2005-04-17 02:20:36 +04:00
data + = pktsze ;
len - = pktsze ;
}
/*
* Build the final TD for control status
*/
2005-04-25 19:14:31 +04:00
td = uhci_alloc_td ( uhci ) ;
2005-04-17 02:20:36 +04:00
if ( ! td )
2005-12-18 02:00:12 +03:00
goto nomem ;
2005-12-18 01:58:46 +03:00
* plink = cpu_to_le32 ( td - > dma_handle ) ;
2005-04-17 02:20:36 +04:00
/*
* It ' s IN if the pipe is an output pipe or we ' re not expecting
* data back .
*/
destination & = ~ TD_TOKEN_PID_MASK ;
if ( usb_pipeout ( urb - > pipe ) | | ! urb - > transfer_buffer_length )
destination | = USB_PID_IN ;
else
destination | = USB_PID_OUT ;
destination | = TD_TOKEN_TOGGLE ; /* End in Data1 */
status & = ~ TD_CTRL_SPD ;
uhci_add_td_to_urb ( urb , td ) ;
uhci_fill_td ( td , status | TD_CTRL_IOC ,
2005-12-18 01:58:46 +03:00
destination | uhci_explen ( 0 ) , 0 ) ;
2005-12-18 02:00:12 +03:00
plink = & td - > link ;
/*
* Build the new dummy TD and activate the old one
*/
td = uhci_alloc_td ( uhci ) ;
if ( ! td )
goto nomem ;
* plink = cpu_to_le32 ( td - > dma_handle ) ;
uhci_fill_td ( td , 0 , USB_PID_OUT | uhci_explen ( 0 ) , 0 ) ;
wmb ( ) ;
qh - > dummy_td - > status | = __constant_cpu_to_le32 ( TD_CTRL_ACTIVE ) ;
qh - > dummy_td = td ;
2005-04-17 02:20:36 +04:00
/* Low-speed transfers get a different queue, and won't hog the bus.
* Also , some devices enumerate better without FSBR ; the easiest way
* to do that is to put URBs on the low - speed queue while the device
2006-01-24 01:17:21 +03:00
* isn ' t in the CONFIGURED state . */
2005-04-17 02:20:36 +04:00
if ( urb - > dev - > speed = = USB_SPEED_LOW | |
2006-01-24 01:17:21 +03:00
urb - > dev - > state ! = USB_STATE_CONFIGURED )
2005-12-18 01:58:46 +03:00
qh - > skel = uhci - > skel_ls_control_qh ;
2005-04-17 02:20:36 +04:00
else {
2005-12-18 01:58:46 +03:00
qh - > skel = uhci - > skel_fs_control_qh ;
2005-04-17 02:20:36 +04:00
uhci_inc_fsbr ( uhci , urb ) ;
}
2006-05-12 19:19:19 +04:00
urb - > actual_length = - 8 ; /* Account for the SETUP packet */
2005-12-18 01:58:46 +03:00
return 0 ;
2005-12-18 02:00:12 +03:00
nomem :
/* Remove the dummy TD from the td_list so it doesn't get freed */
uhci_remove_td_from_urb ( qh - > dummy_td ) ;
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
/*
* Common submit for bulk and interrupt
*/
2005-12-18 01:58:46 +03:00
static int uhci_submit_common ( struct uhci_hcd * uhci , struct urb * urb ,
struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
struct uhci_td * td ;
unsigned long destination , status ;
2005-12-18 01:58:46 +03:00
int maxsze = le16_to_cpu ( qh - > hep - > desc . wMaxPacketSize ) ;
2005-04-17 02:20:36 +04:00
int len = urb - > transfer_buffer_length ;
dma_addr_t data = urb - > transfer_dma ;
2005-12-18 02:00:12 +03:00
__le32 * plink ;
unsigned int toggle ;
2005-04-17 02:20:36 +04:00
if ( len < 0 )
return - EINVAL ;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = ( urb - > pipe & PIPE_DEVEP_MASK ) | usb_packetid ( urb - > pipe ) ;
2005-12-18 02:00:12 +03:00
toggle = usb_gettoggle ( urb - > dev , usb_pipeendpoint ( urb - > pipe ) ,
usb_pipeout ( urb - > pipe ) ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:00:12 +03:00
/* 3 errors, dummy TD remains inactive */
status = uhci_maxerr ( 3 ) ;
2005-04-17 02:20:36 +04:00
if ( urb - > dev - > speed = = USB_SPEED_LOW )
status | = TD_CTRL_LS ;
if ( usb_pipein ( urb - > pipe ) )
status | = TD_CTRL_SPD ;
/*
2005-12-01 01:16:19 +03:00
* Build the DATA TDs
2005-04-17 02:20:36 +04:00
*/
2005-12-18 02:00:12 +03:00
plink = NULL ;
td = qh - > dummy_td ;
2005-04-17 02:20:36 +04:00
do { /* Allow zero length packets */
int pktsze = maxsze ;
2005-12-18 01:58:46 +03:00
if ( len < = pktsze ) { /* The last packet */
2005-04-17 02:20:36 +04:00
pktsze = len ;
if ( ! ( urb - > transfer_flags & URB_SHORT_NOT_OK ) )
status & = ~ TD_CTRL_SPD ;
}
2005-12-18 02:00:12 +03:00
if ( plink ) {
td = uhci_alloc_td ( uhci ) ;
if ( ! td )
goto nomem ;
* plink = cpu_to_le32 ( td - > dma_handle ) ;
}
2005-04-17 02:20:36 +04:00
uhci_add_td_to_urb ( urb , td ) ;
2005-12-18 01:58:46 +03:00
uhci_fill_td ( td , status ,
2005-12-18 02:00:12 +03:00
destination | uhci_explen ( pktsze ) |
( toggle < < TD_TOKEN_TOGGLE_SHIFT ) ,
data ) ;
2005-12-18 01:58:46 +03:00
plink = & td - > link ;
2005-12-18 02:00:12 +03:00
status | = TD_CTRL_ACTIVE ;
2005-04-17 02:20:36 +04:00
data + = pktsze ;
len - = maxsze ;
2005-12-18 02:00:12 +03:00
toggle ^ = 1 ;
2005-04-17 02:20:36 +04:00
} while ( len > 0 ) ;
/*
* URB_ZERO_PACKET means adding a 0 - length packet , if direction
* is OUT and the transfer_length was an exact multiple of maxsze ,
* hence ( len = transfer_length - N * maxsze ) = = 0
* however , if transfer_length = = 0 , the zero packet was already
* prepared above .
*/
2005-12-18 01:58:46 +03:00
if ( ( urb - > transfer_flags & URB_ZERO_PACKET ) & &
usb_pipeout ( urb - > pipe ) & & len = = 0 & &
urb - > transfer_buffer_length > 0 ) {
2005-04-25 19:14:31 +04:00
td = uhci_alloc_td ( uhci ) ;
2005-04-17 02:20:36 +04:00
if ( ! td )
2005-12-18 02:00:12 +03:00
goto nomem ;
2005-12-18 01:58:46 +03:00
* plink = cpu_to_le32 ( td - > dma_handle ) ;
2005-04-17 02:20:36 +04:00
uhci_add_td_to_urb ( urb , td ) ;
2005-12-18 02:00:12 +03:00
uhci_fill_td ( td , status ,
destination | uhci_explen ( 0 ) |
( toggle < < TD_TOKEN_TOGGLE_SHIFT ) ,
data ) ;
plink = & td - > link ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:00:12 +03:00
toggle ^ = 1 ;
2005-04-17 02:20:36 +04:00
}
/* Set the interrupt-on-completion flag on the last packet.
* A more - or - less typical 4 KB URB ( = size of one memory page )
* will require about 3 ms to transfer ; that ' s a little on the
* fast side but not enough to justify delaying an interrupt
* more than 2 or 3 URBs , so we will ignore the URB_NO_INTERRUPT
* flag setting . */
2005-12-18 01:58:46 +03:00
td - > status | = __constant_cpu_to_le32 ( TD_CTRL_IOC ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:00:12 +03:00
/*
* Build the new dummy TD and activate the old one
*/
td = uhci_alloc_td ( uhci ) ;
if ( ! td )
goto nomem ;
* plink = cpu_to_le32 ( td - > dma_handle ) ;
uhci_fill_td ( td , 0 , USB_PID_OUT | uhci_explen ( 0 ) , 0 ) ;
wmb ( ) ;
qh - > dummy_td - > status | = __constant_cpu_to_le32 ( TD_CTRL_ACTIVE ) ;
qh - > dummy_td = td ;
usb_settoggle ( urb - > dev , usb_pipeendpoint ( urb - > pipe ) ,
usb_pipeout ( urb - > pipe ) , toggle ) ;
2005-12-18 01:58:46 +03:00
return 0 ;
2005-12-18 02:00:12 +03:00
nomem :
/* Remove the dummy TD from the td_list so it doesn't get freed */
uhci_remove_td_from_urb ( qh - > dummy_td ) ;
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
static inline int uhci_submit_bulk ( struct uhci_hcd * uhci , struct urb * urb ,
struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
int ret ;
/* Can't have low-speed bulk transfers */
if ( urb - > dev - > speed = = USB_SPEED_LOW )
return - EINVAL ;
2005-12-18 01:58:46 +03:00
qh - > skel = uhci - > skel_bulk_qh ;
ret = uhci_submit_common ( uhci , urb , qh ) ;
if ( ret = = 0 )
2005-04-17 02:20:36 +04:00
uhci_inc_fsbr ( uhci , urb ) ;
return ret ;
}
2005-12-18 01:58:46 +03:00
static inline int uhci_submit_interrupt ( struct uhci_hcd * uhci , struct urb * urb ,
struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
2005-12-18 01:58:46 +03:00
/* USB 1.1 interrupt transfers only involve one packet per interval.
* Drivers can submit URBs of any length , but longer ones will need
* multiple intervals to complete .
2005-04-17 02:20:36 +04:00
*/
2005-12-18 01:58:46 +03:00
qh - > skel = uhci - > skelqh [ __interval_to_skel ( urb - > interval ) ] ;
return uhci_submit_common ( uhci , urb , qh ) ;
2005-04-17 02:20:36 +04:00
}
2006-05-12 19:14:25 +04:00
/*
* Fix up the data structures following a short transfer
*/
static int uhci_fixup_short_transfer ( struct uhci_hcd * uhci ,
2006-05-12 19:19:19 +04:00
struct uhci_qh * qh , struct urb_priv * urbp )
2006-05-12 19:14:25 +04:00
{
struct uhci_td * td ;
2006-05-12 19:19:19 +04:00
struct list_head * tmp ;
int ret ;
2006-05-12 19:14:25 +04:00
td = list_entry ( urbp - > td_list . prev , struct uhci_td , list ) ;
if ( qh - > type = = USB_ENDPOINT_XFER_CONTROL ) {
/* When a control transfer is short, we have to restart
* the queue at the status stage transaction , which is
* the last TD . */
2006-05-12 19:19:19 +04:00
WARN_ON ( list_empty ( & urbp - > td_list ) ) ;
2006-05-12 19:14:25 +04:00
qh - > element = cpu_to_le32 ( td - > dma_handle ) ;
2006-05-12 19:19:19 +04:00
tmp = td - > list . prev ;
2006-05-12 19:14:25 +04:00
ret = - EINPROGRESS ;
2006-05-12 19:19:19 +04:00
} else {
2006-05-12 19:14:25 +04:00
/* When a bulk/interrupt transfer is short, we have to
* fix up the toggles of the following URBs on the queue
* before restarting the queue at the next URB . */
2006-05-12 19:19:19 +04:00
qh - > initial_toggle = uhci_toggle ( td_token ( qh - > post_td ) ) ^ 1 ;
2006-05-12 19:14:25 +04:00
uhci_fixup_toggles ( qh , 1 ) ;
2006-05-12 19:19:19 +04:00
if ( list_empty ( & urbp - > td_list ) )
td = qh - > post_td ;
2006-05-12 19:14:25 +04:00
qh - > element = td - > link ;
2006-05-12 19:19:19 +04:00
tmp = urbp - > td_list . prev ;
ret = 0 ;
2006-05-12 19:14:25 +04:00
}
2006-05-12 19:19:19 +04:00
/* Remove all the TDs we skipped over, from tmp back to the start */
while ( tmp ! = & urbp - > td_list ) {
td = list_entry ( tmp , struct uhci_td , list ) ;
tmp = tmp - > prev ;
uhci_remove_td_from_urb ( td ) ;
list_add ( & td - > remove_list , & uhci - > td_remove_list ) ;
}
2006-05-12 19:14:25 +04:00
return ret ;
}
/*
* Common result for control , bulk , and interrupt
*/
static int uhci_result_common ( struct uhci_hcd * uhci , struct urb * urb )
{
struct urb_priv * urbp = urb - > hcpriv ;
struct uhci_qh * qh = urbp - > qh ;
2006-05-12 19:19:19 +04:00
struct uhci_td * td , * tmp ;
2006-05-12 19:14:25 +04:00
unsigned status ;
int ret = 0 ;
2006-05-12 19:19:19 +04:00
list_for_each_entry_safe ( td , tmp , & urbp - > td_list , list ) {
2006-05-12 19:14:25 +04:00
unsigned int ctrlstat ;
int len ;
ctrlstat = td_status ( td ) ;
status = uhci_status_bits ( ctrlstat ) ;
if ( status & TD_CTRL_ACTIVE )
return - EINPROGRESS ;
len = uhci_actual_length ( ctrlstat ) ;
urb - > actual_length + = len ;
if ( status ) {
ret = uhci_map_status ( status ,
uhci_packetout ( td_token ( td ) ) ) ;
if ( ( debug = = 1 & & ret ! = - EPIPE ) | | debug > 1 ) {
/* Some debugging code */
dev_dbg ( uhci_dev ( uhci ) ,
" %s: failed with status %x \n " ,
__FUNCTION__ , status ) ;
if ( debug > 1 & & errbuf ) {
/* Print the chain for debugging */
uhci_show_qh ( urbp - > qh , errbuf ,
ERRBUF_LEN , 0 ) ;
lprintk ( errbuf ) ;
}
}
} else if ( len < uhci_expected_length ( td_token ( td ) ) ) {
/* We received a short packet */
if ( urb - > transfer_flags & URB_SHORT_NOT_OK )
ret = - EREMOTEIO ;
else if ( ctrlstat & TD_CTRL_SPD )
ret = 1 ;
}
2006-05-12 19:19:19 +04:00
uhci_remove_td_from_urb ( td ) ;
if ( qh - > post_td )
list_add ( & qh - > post_td - > remove_list ,
& uhci - > td_remove_list ) ;
qh - > post_td = td ;
2006-05-12 19:14:25 +04:00
if ( ret ! = 0 )
goto err ;
}
return ret ;
err :
if ( ret < 0 ) {
/* In case a control transfer gets an error
* during the setup stage */
urb - > actual_length = max ( urb - > actual_length , 0 ) ;
/* Note that the queue has stopped and save
* the next toggle value */
qh - > element = UHCI_PTR_TERM ;
qh - > is_stopped = 1 ;
qh - > needs_fixup = ( qh - > type ! = USB_ENDPOINT_XFER_CONTROL ) ;
qh - > initial_toggle = uhci_toggle ( td_token ( td ) ) ^
( ret = = - EREMOTEIO ) ;
} else /* Short packet received */
2006-05-12 19:19:19 +04:00
ret = uhci_fixup_short_transfer ( uhci , qh , urbp ) ;
2006-05-12 19:14:25 +04:00
return ret ;
}
2005-04-17 02:20:36 +04:00
/*
* Isochronous transfers
*/
2005-12-18 02:02:38 +03:00
static int uhci_submit_isochronous ( struct uhci_hcd * uhci , struct urb * urb ,
struct uhci_qh * qh )
2005-04-17 02:20:36 +04:00
{
2005-12-18 02:02:38 +03:00
struct uhci_td * td = NULL ; /* Since urb->number_of_packets > 0 */
int i , frame ;
unsigned long destination , status ;
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
2005-04-17 02:20:36 +04:00
if ( urb - > number_of_packets > 900 ) /* 900? Why? */
return - EFBIG ;
2005-12-18 02:02:38 +03:00
status = TD_CTRL_ACTIVE | TD_CTRL_IOS ;
destination = ( urb - > pipe & PIPE_DEVEP_MASK ) | usb_packetid ( urb - > pipe ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* Figure out the starting frame number */
2005-04-17 02:20:36 +04:00
if ( urb - > transfer_flags & URB_ISO_ASAP ) {
2005-12-18 02:02:38 +03:00
if ( list_empty ( & qh - > queue ) ) {
2005-04-17 02:20:36 +04:00
uhci_get_current_frame_number ( uhci ) ;
2005-12-18 02:02:38 +03:00
urb - > start_frame = ( uhci - > frame_number + 10 ) ;
} else { /* Go right after the last one */
struct urb * last_urb ;
last_urb = list_entry ( qh - > queue . prev ,
struct urb_priv , node ) - > urb ;
urb - > start_frame = ( last_urb - > start_frame +
last_urb - > number_of_packets *
last_urb - > interval ) ;
}
2005-04-17 02:20:36 +04:00
} else {
/* FIXME: Sanity check */
}
2005-12-18 02:02:38 +03:00
urb - > start_frame & = ( UHCI_NUMFRAMES - 1 ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 01:00:24 +04:00
for ( i = 0 ; i < urb - > number_of_packets ; i + + ) {
2005-04-25 19:14:31 +04:00
td = uhci_alloc_td ( uhci ) ;
2005-04-17 02:20:36 +04:00
if ( ! td )
return - ENOMEM ;
uhci_add_td_to_urb ( urb , td ) ;
2005-12-18 01:58:46 +03:00
uhci_fill_td ( td , status , destination |
uhci_explen ( urb - > iso_frame_desc [ i ] . length ) ,
urb - > transfer_dma +
urb - > iso_frame_desc [ i ] . offset ) ;
2005-10-14 01:00:24 +04:00
}
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* Set the interrupt-on-completion flag on the last packet. */
td - > status | = __constant_cpu_to_le32 ( TD_CTRL_IOC ) ;
qh - > skel = uhci - > skel_iso_qh ;
/* Add the TDs to the frame list */
2005-10-14 01:00:24 +04:00
frame = urb - > start_frame ;
list_for_each_entry ( td , & urbp - > td_list , list ) {
2005-12-18 01:58:46 +03:00
uhci_insert_td_in_frame_list ( uhci , td , frame ) ;
2005-10-14 01:00:24 +04:00
frame + = urb - > interval ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static int uhci_result_isochronous ( struct uhci_hcd * uhci , struct urb * urb )
{
struct uhci_td * td ;
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
int status ;
int i , ret = 0 ;
2005-10-14 01:00:24 +04:00
urb - > actual_length = urb - > error_count = 0 ;
2005-04-17 02:20:36 +04:00
i = 0 ;
list_for_each_entry ( td , & urbp - > td_list , list ) {
int actlength ;
unsigned int ctrlstat = td_status ( td ) ;
if ( ctrlstat & TD_CTRL_ACTIVE )
return - EINPROGRESS ;
actlength = uhci_actual_length ( ctrlstat ) ;
urb - > iso_frame_desc [ i ] . actual_length = actlength ;
urb - > actual_length + = actlength ;
status = uhci_map_status ( uhci_status_bits ( ctrlstat ) ,
usb_pipeout ( urb - > pipe ) ) ;
urb - > iso_frame_desc [ i ] . status = status ;
if ( status ) {
urb - > error_count + + ;
ret = status ;
}
i + + ;
}
return ret ;
}
static int uhci_urb_enqueue ( struct usb_hcd * hcd ,
2005-12-18 01:58:46 +03:00
struct usb_host_endpoint * hep ,
2005-10-21 11:21:58 +04:00
struct urb * urb , gfp_t mem_flags )
2005-04-17 02:20:36 +04:00
{
int ret ;
struct uhci_hcd * uhci = hcd_to_uhci ( hcd ) ;
unsigned long flags ;
2005-12-18 01:58:46 +03:00
struct urb_priv * urbp ;
struct uhci_qh * qh ;
2005-04-17 02:20:36 +04:00
int bustime ;
spin_lock_irqsave ( & uhci - > lock , flags ) ;
ret = urb - > status ;
if ( ret ! = - EINPROGRESS ) /* URB already unlinked! */
2005-12-18 01:58:46 +03:00
goto done ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
ret = - ENOMEM ;
urbp = uhci_alloc_urb_priv ( uhci , urb ) ;
if ( ! urbp )
goto done ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
if ( hep - > hcpriv )
qh = ( struct uhci_qh * ) hep - > hcpriv ;
else {
qh = uhci_alloc_qh ( uhci , urb - > dev , hep ) ;
if ( ! qh )
goto err_no_qh ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 01:58:46 +03:00
urbp - > qh = qh ;
2005-04-17 02:20:36 +04:00
2006-05-06 00:26:58 +04:00
switch ( qh - > type ) {
case USB_ENDPOINT_XFER_CONTROL :
2005-12-18 01:58:46 +03:00
ret = uhci_submit_control ( uhci , urb , qh ) ;
break ;
2006-05-06 00:26:58 +04:00
case USB_ENDPOINT_XFER_BULK :
2005-12-18 01:58:46 +03:00
ret = uhci_submit_bulk ( uhci , urb , qh ) ;
2005-04-17 02:20:36 +04:00
break ;
2006-05-06 00:26:58 +04:00
case USB_ENDPOINT_XFER_INT :
2005-12-18 01:58:46 +03:00
if ( list_empty ( & qh - > queue ) ) {
2005-04-17 02:20:36 +04:00
bustime = usb_check_bandwidth ( urb - > dev , urb ) ;
if ( bustime < 0 )
ret = bustime ;
else {
2005-12-18 01:58:46 +03:00
ret = uhci_submit_interrupt ( uhci , urb , qh ) ;
if ( ret = = 0 )
2005-04-17 02:20:36 +04:00
usb_claim_bandwidth ( urb - > dev , urb , bustime , 0 ) ;
}
} else { /* inherit from parent */
2005-12-18 01:58:46 +03:00
struct urb_priv * eurbp ;
eurbp = list_entry ( qh - > queue . prev , struct urb_priv ,
node ) ;
urb - > bandwidth = eurbp - > urb - > bandwidth ;
ret = uhci_submit_interrupt ( uhci , urb , qh ) ;
2005-04-17 02:20:36 +04:00
}
break ;
2006-05-06 00:26:58 +04:00
case USB_ENDPOINT_XFER_ISOC :
2005-04-17 02:20:36 +04:00
bustime = usb_check_bandwidth ( urb - > dev , urb ) ;
if ( bustime < 0 ) {
ret = bustime ;
break ;
}
2005-12-18 01:58:46 +03:00
ret = uhci_submit_isochronous ( uhci , urb , qh ) ;
if ( ret = = 0 )
2005-04-17 02:20:36 +04:00
usb_claim_bandwidth ( urb - > dev , urb , bustime , 1 ) ;
break ;
}
2005-12-18 01:58:46 +03:00
if ( ret ! = 0 )
goto err_submit_failed ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* Add this URB to the QH */
urbp - > qh = qh ;
list_add_tail ( & urbp - > node , & qh - > queue ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/* If the new URB is the first and only one on this QH then either
* the QH is new and idle or else it ' s unlinked and waiting to
2006-05-06 00:32:02 +04:00
* become idle , so we can activate it right away . But only if the
* queue isn ' t stopped . */
if ( qh - > queue . next = = & urbp - > node & & ! qh - > is_stopped )
2005-12-18 01:58:46 +03:00
uhci_activate_qh ( uhci , qh ) ;
goto done ;
err_submit_failed :
if ( qh - > state = = QH_STATE_IDLE )
uhci_make_qh_idle ( uhci , qh ) ; /* Reclaim unused QH */
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
err_no_qh :
uhci_free_urb_priv ( uhci , urbp ) ;
done :
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & uhci - > lock , flags ) ;
return ret ;
}
2005-12-18 02:02:38 +03:00
static int uhci_urb_dequeue ( struct usb_hcd * hcd , struct urb * urb )
{
struct uhci_hcd * uhci = hcd_to_uhci ( hcd ) ;
unsigned long flags ;
struct urb_priv * urbp ;
spin_lock_irqsave ( & uhci - > lock , flags ) ;
urbp = urb - > hcpriv ;
if ( ! urbp ) /* URB was never linked! */
goto done ;
/* Remove Isochronous TDs from the frame list ASAP */
2006-05-06 00:26:58 +04:00
if ( urbp - > qh - > type = = USB_ENDPOINT_XFER_ISOC )
2005-12-18 02:02:38 +03:00
uhci_unlink_isochronous_tds ( uhci , urb ) ;
uhci_unlink_qh ( uhci , urbp - > qh ) ;
done :
spin_unlock_irqrestore ( & uhci - > lock , flags ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
2005-12-18 02:02:38 +03:00
* Finish unlinking an URB and give it back
2005-04-17 02:20:36 +04:00
*/
2005-12-18 02:02:38 +03:00
static void uhci_giveback_urb ( struct uhci_hcd * uhci , struct uhci_qh * qh ,
struct urb * urb , struct pt_regs * regs )
__releases ( uhci - > lock )
__acquires ( uhci - > lock )
2005-04-17 02:20:36 +04:00
{
2005-12-18 01:58:46 +03:00
struct urb_priv * urbp = ( struct urb_priv * ) urb - > hcpriv ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* Isochronous TDs get unlinked directly from the frame list */
2006-05-06 00:26:58 +04:00
if ( qh - > type = = USB_ENDPOINT_XFER_ISOC )
2005-12-18 02:02:38 +03:00
uhci_unlink_isochronous_tds ( uhci , urb ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* If the URB isn't first on its queue, adjust the link pointer
* of the last TD in the previous URB . */
else if ( qh - > queue . next ! = & urbp - > node ) {
struct urb_priv * purbp ;
struct uhci_td * ptd , * ltd ;
2005-12-18 01:58:46 +03:00
2005-12-18 02:02:38 +03:00
purbp = list_entry ( urbp - > node . prev , struct urb_priv , node ) ;
2006-05-12 19:19:19 +04:00
WARN_ON ( list_empty ( & purbp - > td_list ) ) ;
2005-12-18 02:02:38 +03:00
ptd = list_entry ( purbp - > td_list . prev , struct uhci_td ,
list ) ;
ltd = list_entry ( urbp - > td_list . prev , struct uhci_td ,
list ) ;
ptd - > link = ltd - > link ;
2005-12-18 01:58:46 +03:00
}
2005-12-18 02:02:38 +03:00
/* Take the URB off the QH's queue. If the queue is now empty,
* this is a perfect time for a toggle fixup . */
list_del_init ( & urbp - > node ) ;
if ( list_empty ( & qh - > queue ) & & qh - > needs_fixup ) {
usb_settoggle ( urb - > dev , usb_pipeendpoint ( urb - > pipe ) ,
usb_pipeout ( urb - > pipe ) , qh - > initial_toggle ) ;
qh - > needs_fixup = 0 ;
}
uhci_dec_fsbr ( uhci , urb ) ; /* Safe since it checks */
uhci_free_urb_priv ( uhci , urbp ) ;
2005-04-17 02:20:36 +04:00
2006-05-06 00:26:58 +04:00
switch ( qh - > type ) {
case USB_ENDPOINT_XFER_ISOC :
2005-04-17 02:20:36 +04:00
/* Release bandwidth for Interrupt or Isoc. transfers */
if ( urb - > bandwidth )
usb_release_bandwidth ( urb - > dev , urb , 1 ) ;
break ;
2006-05-06 00:26:58 +04:00
case USB_ENDPOINT_XFER_INT :
2005-04-17 02:20:36 +04:00
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Make sure we don't release if we have a queued URB */
2005-12-18 02:02:38 +03:00
if ( list_empty ( & qh - > queue ) & & urb - > bandwidth )
2005-04-17 02:20:36 +04:00
usb_release_bandwidth ( urb - > dev , urb , 0 ) ;
else
/* bandwidth was passed on to queued URB, */
/* so don't let usb_unlink_urb() release it */
urb - > bandwidth = 0 ;
break ;
}
2005-12-18 02:02:38 +03:00
spin_unlock ( & uhci - > lock ) ;
usb_hcd_giveback_urb ( uhci_to_hcd ( uhci ) , urb , regs ) ;
spin_lock ( & uhci - > lock ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* If the queue is now empty, we can unlink the QH and give up its
* reserved bandwidth . */
if ( list_empty ( & qh - > queue ) ) {
uhci_unlink_qh ( uhci , qh ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* Bandwidth stuff not yet implemented */
}
2005-12-18 01:58:46 +03:00
}
2005-04-17 02:20:36 +04:00
2005-12-18 01:58:46 +03:00
/*
2005-12-18 02:02:38 +03:00
* Scan the URBs in a QH ' s queue
2005-12-18 01:58:46 +03:00
*/
2005-12-18 02:02:38 +03:00
# define QH_FINISHED_UNLINKING(qh) \
( qh - > state = = QH_STATE_UNLINKING & & \
uhci - > frame_number + uhci - > is_stopped ! = qh - > unlink_frame )
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
static void uhci_scan_qh ( struct uhci_hcd * uhci , struct uhci_qh * qh ,
struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
struct urb_priv * urbp ;
2005-12-18 02:02:38 +03:00
struct urb * urb ;
int status ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
while ( ! list_empty ( & qh - > queue ) ) {
urbp = list_entry ( qh - > queue . next , struct urb_priv , node ) ;
urb = urbp - > urb ;
2005-04-17 02:20:36 +04:00
2006-05-12 19:14:25 +04:00
if ( qh - > type = = USB_ENDPOINT_XFER_ISOC )
2005-12-18 02:02:38 +03:00
status = uhci_result_isochronous ( uhci , urb ) ;
2006-05-12 19:14:25 +04:00
else
2005-12-18 02:02:38 +03:00
status = uhci_result_common ( uhci , urb ) ;
if ( status = = - EINPROGRESS )
break ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
spin_lock ( & urb - > lock ) ;
if ( urb - > status = = - EINPROGRESS ) /* Not dequeued */
urb - > status = status ;
else
2006-05-06 00:32:02 +04:00
status = ECONNRESET ; /* Not -ECONNRESET */
2005-12-18 02:02:38 +03:00
spin_unlock ( & urb - > lock ) ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* Dequeued but completed URBs can't be given back unless
* the QH is stopped or has finished unlinking . */
2006-05-06 00:32:02 +04:00
if ( status = = ECONNRESET ) {
if ( QH_FINISHED_UNLINKING ( qh ) )
qh - > is_stopped = 1 ;
else if ( ! qh - > is_stopped )
return ;
}
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
uhci_giveback_urb ( uhci , qh , urb , regs ) ;
2006-05-06 00:32:02 +04:00
if ( status < 0 )
2005-12-18 02:02:38 +03:00
break ;
}
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* If the QH is neither stopped nor finished unlinking (normal case),
* our work here is done . */
2006-05-06 00:32:02 +04:00
if ( QH_FINISHED_UNLINKING ( qh ) )
qh - > is_stopped = 1 ;
else if ( ! qh - > is_stopped )
2005-12-18 02:02:38 +03:00
return ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* Otherwise give back each of the dequeued URBs */
2006-05-06 00:32:02 +04:00
restart :
2005-12-18 02:02:38 +03:00
list_for_each_entry ( urbp , & qh - > queue , node ) {
urb = urbp - > urb ;
if ( urb - > status ! = - EINPROGRESS ) {
uhci_save_toggle ( qh , urb ) ;
uhci_giveback_urb ( uhci , qh , urb , regs ) ;
goto restart ;
}
}
qh - > is_stopped = 0 ;
2005-04-17 02:20:36 +04:00
2005-12-18 02:02:38 +03:00
/* There are no more dequeued URBs. If there are still URBs on the
* queue , the QH can now be re - activated . */
if ( ! list_empty ( & qh - > queue ) ) {
if ( qh - > needs_fixup )
uhci_fixup_toggles ( qh , 0 ) ;
uhci_activate_qh ( uhci , qh ) ;
2005-04-17 02:20:36 +04:00
}
2005-12-18 02:02:38 +03:00
/* The queue is empty. The QH can become idle if it is fully
* unlinked . */
else if ( QH_FINISHED_UNLINKING ( qh ) )
uhci_make_qh_idle ( uhci , qh ) ;
2005-04-17 02:20:36 +04:00
}
static void uhci_free_pending_tds ( struct uhci_hcd * uhci )
{
struct uhci_td * td , * tmp ;
list_for_each_entry_safe ( td , tmp , & uhci - > td_remove_list , remove_list ) {
list_del_init ( & td - > remove_list ) ;
uhci_free_td ( uhci , td ) ;
}
}
2005-12-18 02:02:38 +03:00
/*
* Process events in the schedule , but only in one thread at a time
*/
2005-04-17 02:20:36 +04:00
static void uhci_scan_schedule ( struct uhci_hcd * uhci , struct pt_regs * regs )
{
2005-12-18 02:02:38 +03:00
int i ;
struct uhci_qh * qh ;
2005-04-17 02:20:36 +04:00
/* Don't allow re-entrant calls */
if ( uhci - > scan_in_progress ) {
uhci - > need_rescan = 1 ;
return ;
}
uhci - > scan_in_progress = 1 ;
rescan :
uhci - > need_rescan = 0 ;
2005-04-22 00:04:58 +04:00
uhci_clear_next_interrupt ( uhci ) ;
2005-04-17 02:20:36 +04:00
uhci_get_current_frame_number ( uhci ) ;
if ( uhci - > frame_number + uhci - > is_stopped ! = uhci - > td_remove_age )
uhci_free_pending_tds ( uhci ) ;
2005-12-18 02:02:38 +03:00
/* Go through all the QH queues and process the URBs in each one */
for ( i = 0 ; i < UHCI_NUM_SKELQH - 1 ; + + i ) {
uhci - > next_qh = list_entry ( uhci - > skelqh [ i ] - > node . next ,
struct uhci_qh , node ) ;
while ( ( qh = uhci - > next_qh ) ! = uhci - > skelqh [ i ] ) {
uhci - > next_qh = list_entry ( qh - > node . next ,
struct uhci_qh , node ) ;
uhci_scan_qh ( uhci , qh , regs ) ;
}
2005-04-17 02:20:36 +04:00
}
if ( uhci - > need_rescan )
goto rescan ;
uhci - > scan_in_progress = 0 ;
2005-12-18 02:02:38 +03:00
/* If the controller is stopped, we can finish these off right now */
if ( uhci - > is_stopped )
uhci_free_pending_tds ( uhci ) ;
2005-12-18 01:58:46 +03:00
if ( list_empty ( & uhci - > td_remove_list ) & &
list_empty ( & uhci - > skel_unlink_qh - > node ) )
2005-04-17 02:20:36 +04:00
uhci_clear_next_interrupt ( uhci ) ;
else
uhci_set_next_interrupt ( uhci ) ;
}
2005-04-10 01:26:00 +04:00
static void check_fsbr ( struct uhci_hcd * uhci )
{
2005-12-18 02:02:38 +03:00
/* For now, don't scan URBs for FSBR timeouts.
* Add it back in later . . . */
2005-04-10 01:26:00 +04:00
/* Really disable FSBR */
if ( ! uhci - > fsbr & & uhci - > fsbrtimeout & & time_after_eq ( jiffies , uhci - > fsbrtimeout ) ) {
uhci - > fsbrtimeout = 0 ;
uhci - > skel_term_qh - > link = UHCI_PTR_TERM ;
}
}