2018-05-21 16:39:49 +03:00
// SPDX-License-Identifier: GPL-2.0
2020-07-03 18:41:33 +01:00
/*
2017-12-08 17:59:10 +02:00
* xhci - dbgcap . c - xHCI debug capability support
*
* Copyright ( C ) 2017 Intel Corporation
*
* Author : Lu Baolu < baolu . lu @ linux . intel . com >
*/
# include <linux/dma-mapping.h>
# include <linux/slab.h>
# include <linux/nls.h>
# include "xhci.h"
# include "xhci-trace.h"
# include "xhci-dbgcap.h"
2020-07-23 17:45:21 +03:00
static void dbc_free_ctx ( struct device * dev , struct xhci_container_ctx * ctx )
{
if ( ! ctx )
return ;
dma_free_coherent ( dev , ctx - > size , ctx - > bytes , ctx - > dma ) ;
kfree ( ctx ) ;
}
2020-07-23 17:45:22 +03:00
/* we use only one segment for DbC rings */
static void dbc_ring_free ( struct device * dev , struct xhci_ring * ring )
{
if ( ! ring )
return ;
if ( ring - > first_seg & & ring - > first_seg - > trbs ) {
dma_free_coherent ( dev , TRB_SEGMENT_SIZE ,
ring - > first_seg - > trbs ,
ring - > first_seg - > dma ) ;
kfree ( ring - > first_seg ) ;
}
kfree ( ring ) ;
}
2017-12-08 17:59:10 +02:00
static u32 xhci_dbc_populate_strings ( struct dbc_str_descs * strings )
{
struct usb_string_descriptor * s_desc ;
u32 string_length ;
/* Serial string: */
s_desc = ( struct usb_string_descriptor * ) strings - > serial ;
utf8s_to_utf16s ( DBC_STRING_SERIAL , strlen ( DBC_STRING_SERIAL ) ,
UTF16_LITTLE_ENDIAN , ( wchar_t * ) s_desc - > wData ,
DBC_MAX_STRING_LENGTH ) ;
s_desc - > bLength = ( strlen ( DBC_STRING_SERIAL ) + 1 ) * 2 ;
s_desc - > bDescriptorType = USB_DT_STRING ;
string_length = s_desc - > bLength ;
string_length < < = 8 ;
/* Product string: */
s_desc = ( struct usb_string_descriptor * ) strings - > product ;
utf8s_to_utf16s ( DBC_STRING_PRODUCT , strlen ( DBC_STRING_PRODUCT ) ,
UTF16_LITTLE_ENDIAN , ( wchar_t * ) s_desc - > wData ,
DBC_MAX_STRING_LENGTH ) ;
s_desc - > bLength = ( strlen ( DBC_STRING_PRODUCT ) + 1 ) * 2 ;
s_desc - > bDescriptorType = USB_DT_STRING ;
string_length + = s_desc - > bLength ;
string_length < < = 8 ;
/* Manufacture string: */
s_desc = ( struct usb_string_descriptor * ) strings - > manufacturer ;
utf8s_to_utf16s ( DBC_STRING_MANUFACTURER ,
strlen ( DBC_STRING_MANUFACTURER ) ,
UTF16_LITTLE_ENDIAN , ( wchar_t * ) s_desc - > wData ,
DBC_MAX_STRING_LENGTH ) ;
s_desc - > bLength = ( strlen ( DBC_STRING_MANUFACTURER ) + 1 ) * 2 ;
s_desc - > bDescriptorType = USB_DT_STRING ;
string_length + = s_desc - > bLength ;
string_length < < = 8 ;
/* String0: */
strings - > string0 [ 0 ] = 4 ;
strings - > string0 [ 1 ] = USB_DT_STRING ;
strings - > string0 [ 2 ] = 0x09 ;
strings - > string0 [ 3 ] = 0x04 ;
string_length + = 4 ;
return string_length ;
}
2020-07-23 17:45:13 +03:00
static void xhci_dbc_init_contexts ( struct xhci_dbc * dbc , u32 string_length )
2017-12-08 17:59:10 +02:00
{
struct dbc_info_context * info ;
struct xhci_ep_ctx * ep_ctx ;
u32 dev_info ;
dma_addr_t deq , dma ;
unsigned int max_burst ;
if ( ! dbc )
return ;
/* Populate info Context: */
info = ( struct dbc_info_context * ) dbc - > ctx - > bytes ;
dma = dbc - > string_dma ;
info - > string0 = cpu_to_le64 ( dma ) ;
info - > manufacturer = cpu_to_le64 ( dma + DBC_MAX_STRING_LENGTH ) ;
info - > product = cpu_to_le64 ( dma + DBC_MAX_STRING_LENGTH * 2 ) ;
info - > serial = cpu_to_le64 ( dma + DBC_MAX_STRING_LENGTH * 3 ) ;
info - > length = cpu_to_le32 ( string_length ) ;
/* Populate bulk out endpoint context: */
ep_ctx = dbc_bulkout_ctx ( dbc ) ;
max_burst = DBC_CTRL_MAXBURST ( readl ( & dbc - > regs - > control ) ) ;
deq = dbc_bulkout_enq ( dbc ) ;
ep_ctx - > ep_info = 0 ;
ep_ctx - > ep_info2 = dbc_epctx_info2 ( BULK_OUT_EP , 1024 , max_burst ) ;
ep_ctx - > deq = cpu_to_le64 ( deq | dbc - > ring_out - > cycle_state ) ;
/* Populate bulk in endpoint context: */
ep_ctx = dbc_bulkin_ctx ( dbc ) ;
deq = dbc_bulkin_enq ( dbc ) ;
ep_ctx - > ep_info = 0 ;
ep_ctx - > ep_info2 = dbc_epctx_info2 ( BULK_IN_EP , 1024 , max_burst ) ;
ep_ctx - > deq = cpu_to_le64 ( deq | dbc - > ring_in - > cycle_state ) ;
/* Set DbC context and info registers: */
2020-07-23 17:45:12 +03:00
lo_hi_writeq ( dbc - > ctx - > dma , & dbc - > regs - > dccp ) ;
2017-12-08 17:59:10 +02:00
dev_info = cpu_to_le32 ( ( DBC_VENDOR_ID < < 16 ) | DBC_PROTOCOL ) ;
writel ( dev_info , & dbc - > regs - > devinfo1 ) ;
dev_info = cpu_to_le32 ( ( DBC_DEVICE_REV < < 16 ) | DBC_PRODUCT_ID ) ;
writel ( dev_info , & dbc - > regs - > devinfo2 ) ;
}
static void xhci_dbc_giveback ( struct dbc_request * req , int status )
__releases ( & dbc - > lock )
__acquires ( & dbc - > lock )
{
2020-07-23 17:45:25 +03:00
struct xhci_dbc * dbc = req - > dbc ;
2020-07-23 17:45:20 +03:00
struct device * dev = dbc - > dev ;
2017-12-08 17:59:10 +02:00
list_del_init ( & req - > list_pending ) ;
req - > trb_dma = 0 ;
req - > trb = NULL ;
if ( req - > status = = - EINPROGRESS )
req - > status = status ;
trace_xhci_dbc_giveback_request ( req ) ;
dma_unmap_single ( dev ,
req - > dma ,
req - > length ,
2020-07-23 17:45:25 +03:00
dbc_ep_dma_direction ( req ) ) ;
2017-12-08 17:59:10 +02:00
/* Give back the transfer request: */
spin_unlock ( & dbc - > lock ) ;
2020-07-23 17:45:20 +03:00
req - > complete ( dbc , req ) ;
2017-12-08 17:59:10 +02:00
spin_lock ( & dbc - > lock ) ;
}
static void xhci_dbc_flush_single_request ( struct dbc_request * req )
{
union xhci_trb * trb = req - > trb ;
trb - > generic . field [ 0 ] = 0 ;
trb - > generic . field [ 1 ] = 0 ;
trb - > generic . field [ 2 ] = 0 ;
trb - > generic . field [ 3 ] & = cpu_to_le32 ( TRB_CYCLE ) ;
trb - > generic . field [ 3 ] | = cpu_to_le32 ( TRB_TYPE ( TRB_TR_NOOP ) ) ;
xhci_dbc_giveback ( req , - ESHUTDOWN ) ;
}
static void xhci_dbc_flush_endpoint_requests ( struct dbc_ep * dep )
{
struct dbc_request * req , * tmp ;
list_for_each_entry_safe ( req , tmp , & dep - > list_pending , list_pending )
xhci_dbc_flush_single_request ( req ) ;
}
2019-02-20 19:50:55 +02:00
static void xhci_dbc_flush_requests ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
xhci_dbc_flush_endpoint_requests ( & dbc - > eps [ BULK_OUT ] ) ;
xhci_dbc_flush_endpoint_requests ( & dbc - > eps [ BULK_IN ] ) ;
}
struct dbc_request *
2020-07-23 17:45:25 +03:00
dbc_alloc_request ( struct xhci_dbc * dbc , unsigned int direction , gfp_t flags )
2017-12-08 17:59:10 +02:00
{
struct dbc_request * req ;
2020-07-23 17:45:25 +03:00
if ( direction ! = BULK_IN & &
direction ! = BULK_OUT )
return NULL ;
if ( ! dbc )
return NULL ;
req = kzalloc ( sizeof ( * req ) , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ! req )
return NULL ;
2020-07-23 17:45:25 +03:00
req - > dbc = dbc ;
2017-12-08 17:59:10 +02:00
INIT_LIST_HEAD ( & req - > list_pending ) ;
INIT_LIST_HEAD ( & req - > list_pool ) ;
2020-07-23 17:45:25 +03:00
req - > direction = direction ;
2017-12-08 17:59:10 +02:00
trace_xhci_dbc_alloc_request ( req ) ;
return req ;
}
void
2020-07-23 17:45:25 +03:00
dbc_free_request ( struct dbc_request * req )
2017-12-08 17:59:10 +02:00
{
trace_xhci_dbc_free_request ( req ) ;
kfree ( req ) ;
}
static void
xhci_dbc_queue_trb ( struct xhci_ring * ring , u32 field1 ,
u32 field2 , u32 field3 , u32 field4 )
{
union xhci_trb * trb , * next ;
trb = ring - > enqueue ;
trb - > generic . field [ 0 ] = cpu_to_le32 ( field1 ) ;
trb - > generic . field [ 1 ] = cpu_to_le32 ( field2 ) ;
trb - > generic . field [ 2 ] = cpu_to_le32 ( field3 ) ;
trb - > generic . field [ 3 ] = cpu_to_le32 ( field4 ) ;
trace_xhci_dbc_gadget_ep_queue ( ring , & trb - > generic ) ;
ring - > num_trbs_free - - ;
next = + + ( ring - > enqueue ) ;
if ( TRB_TYPE_LINK_LE32 ( next - > link . control ) ) {
next - > link . control ^ = cpu_to_le32 ( TRB_CYCLE ) ;
ring - > enqueue = ring - > enq_seg - > trbs ;
ring - > cycle_state ^ = 1 ;
}
}
static int xhci_dbc_queue_bulk_tx ( struct dbc_ep * dep ,
struct dbc_request * req )
{
u64 addr ;
union xhci_trb * trb ;
unsigned int num_trbs ;
2020-07-23 17:45:25 +03:00
struct xhci_dbc * dbc = req - > dbc ;
2017-12-08 17:59:10 +02:00
struct xhci_ring * ring = dep - > ring ;
u32 length , control , cycle ;
num_trbs = count_trbs ( req - > dma , req - > length ) ;
WARN_ON ( num_trbs ! = 1 ) ;
if ( ring - > num_trbs_free < num_trbs )
return - EBUSY ;
addr = req - > dma ;
trb = ring - > enqueue ;
cycle = ring - > cycle_state ;
length = TRB_LEN ( req - > length ) ;
control = TRB_TYPE ( TRB_NORMAL ) | TRB_IOC ;
if ( cycle )
control & = cpu_to_le32 ( ~ TRB_CYCLE ) ;
else
control | = cpu_to_le32 ( TRB_CYCLE ) ;
req - > trb = ring - > enqueue ;
req - > trb_dma = xhci_trb_virt_to_dma ( ring - > enq_seg , ring - > enqueue ) ;
xhci_dbc_queue_trb ( ring ,
lower_32_bits ( addr ) ,
upper_32_bits ( addr ) ,
length , control ) ;
/*
* Add a barrier between writes of trb fields and flipping
* the cycle bit :
*/
wmb ( ) ;
if ( cycle )
trb - > generic . field [ 3 ] | = cpu_to_le32 ( TRB_CYCLE ) ;
else
trb - > generic . field [ 3 ] & = cpu_to_le32 ( ~ TRB_CYCLE ) ;
writel ( DBC_DOOR_BELL_TARGET ( dep - > direction ) , & dbc - > regs - > doorbell ) ;
return 0 ;
}
static int
2020-07-23 17:45:25 +03:00
dbc_ep_do_queue ( struct dbc_request * req )
2017-12-08 17:59:10 +02:00
{
int ret ;
2020-07-23 17:45:25 +03:00
struct xhci_dbc * dbc = req - > dbc ;
2020-07-23 17:45:14 +03:00
struct device * dev = dbc - > dev ;
2020-07-23 17:45:25 +03:00
struct dbc_ep * dep = & dbc - > eps [ req - > direction ] ;
2017-12-08 17:59:10 +02:00
if ( ! req - > length | | ! req - > buf )
return - EINVAL ;
req - > actual = 0 ;
req - > status = - EINPROGRESS ;
req - > dma = dma_map_single ( dev ,
req - > buf ,
req - > length ,
dbc_ep_dma_direction ( dep ) ) ;
if ( dma_mapping_error ( dev , req - > dma ) ) {
2020-07-23 17:45:11 +03:00
dev_err ( dbc - > dev , " failed to map buffer \n " ) ;
2017-12-08 17:59:10 +02:00
return - EFAULT ;
}
ret = xhci_dbc_queue_bulk_tx ( dep , req ) ;
if ( ret ) {
2020-07-23 17:45:11 +03:00
dev_err ( dbc - > dev , " failed to queue trbs \n " ) ;
2017-12-08 17:59:10 +02:00
dma_unmap_single ( dev ,
req - > dma ,
req - > length ,
dbc_ep_dma_direction ( dep ) ) ;
return - EFAULT ;
}
list_add_tail ( & req - > list_pending , & dep - > list_pending ) ;
return 0 ;
}
2020-07-23 17:45:25 +03:00
int dbc_ep_queue ( struct dbc_request * req )
2017-12-08 17:59:10 +02:00
{
2018-03-08 17:17:15 +02:00
unsigned long flags ;
2020-07-23 17:45:25 +03:00
struct xhci_dbc * dbc = req - > dbc ;
2017-12-08 17:59:10 +02:00
int ret = - ESHUTDOWN ;
2020-07-23 17:45:25 +03:00
if ( ! dbc )
return - ENODEV ;
if ( req - > direction ! = BULK_IN & &
req - > direction ! = BULK_OUT )
return - EINVAL ;
2018-03-08 17:17:15 +02:00
spin_lock_irqsave ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
if ( dbc - > state = = DS_CONFIGURED )
2020-07-23 17:45:25 +03:00
ret = dbc_ep_do_queue ( req ) ;
2018-03-08 17:17:15 +02:00
spin_unlock_irqrestore ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
mod_delayed_work ( system_wq , & dbc - > event_work , 0 ) ;
trace_xhci_dbc_queue_request ( req ) ;
return ret ;
}
2020-07-23 17:45:15 +03:00
static inline void xhci_dbc_do_eps_init ( struct xhci_dbc * dbc , bool direction )
2017-12-08 17:59:10 +02:00
{
struct dbc_ep * dep ;
dep = & dbc - > eps [ direction ] ;
dep - > dbc = dbc ;
dep - > direction = direction ;
dep - > ring = direction ? dbc - > ring_in : dbc - > ring_out ;
INIT_LIST_HEAD ( & dep - > list_pending ) ;
}
2020-07-23 17:45:15 +03:00
static void xhci_dbc_eps_init ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
2020-07-23 17:45:15 +03:00
xhci_dbc_do_eps_init ( dbc , BULK_OUT ) ;
xhci_dbc_do_eps_init ( dbc , BULK_IN ) ;
2017-12-08 17:59:10 +02:00
}
2020-07-23 17:45:15 +03:00
static void xhci_dbc_eps_exit ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
2017-12-11 10:38:03 +02:00
memset ( dbc - > eps , 0 , sizeof ( struct dbc_ep ) * ARRAY_SIZE ( dbc - > eps ) ) ;
2017-12-08 17:59:10 +02:00
}
2020-07-23 17:45:07 +03:00
static int dbc_erst_alloc ( struct device * dev , struct xhci_ring * evt_ring ,
struct xhci_erst * erst , gfp_t flags )
{
erst - > entries = dma_alloc_coherent ( dev , sizeof ( struct xhci_erst_entry ) ,
& erst - > erst_dma_addr , flags ) ;
if ( ! erst - > entries )
return - ENOMEM ;
erst - > num_entries = 1 ;
erst - > entries [ 0 ] . seg_addr = cpu_to_le64 ( evt_ring - > first_seg - > dma ) ;
erst - > entries [ 0 ] . seg_size = cpu_to_le32 ( TRBS_PER_SEGMENT ) ;
erst - > entries [ 0 ] . rsvd = 0 ;
return 0 ;
}
static void dbc_erst_free ( struct device * dev , struct xhci_erst * erst )
{
if ( erst - > entries )
dma_free_coherent ( dev , sizeof ( struct xhci_erst_entry ) ,
erst - > entries , erst - > erst_dma_addr ) ;
erst - > entries = NULL ;
}
2020-07-23 17:45:21 +03:00
static struct xhci_container_ctx *
dbc_alloc_ctx ( struct device * dev , gfp_t flags )
{
struct xhci_container_ctx * ctx ;
ctx = kzalloc ( sizeof ( * ctx ) , flags ) ;
if ( ! ctx )
return NULL ;
/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
ctx - > size = 3 * DBC_CONTEXT_SIZE ;
ctx - > bytes = dma_alloc_coherent ( dev , ctx - > size , & ctx - > dma , flags ) ;
if ( ! ctx - > bytes ) {
kfree ( ctx ) ;
return NULL ;
}
return ctx ;
}
2020-07-28 01:12:07 +08:00
static struct xhci_ring *
2020-07-23 17:45:22 +03:00
xhci_dbc_ring_alloc ( struct device * dev , enum xhci_ring_type type , gfp_t flags )
{
struct xhci_ring * ring ;
struct xhci_segment * seg ;
dma_addr_t dma ;
ring = kzalloc ( sizeof ( * ring ) , flags ) ;
if ( ! ring )
return NULL ;
ring - > num_segs = 1 ;
ring - > type = type ;
seg = kzalloc ( sizeof ( * seg ) , flags ) ;
if ( ! seg )
goto seg_fail ;
ring - > first_seg = seg ;
ring - > last_seg = seg ;
seg - > next = seg ;
seg - > trbs = dma_alloc_coherent ( dev , TRB_SEGMENT_SIZE , & dma , flags ) ;
if ( ! seg - > trbs )
goto dma_fail ;
seg - > dma = dma ;
/* Only event ring does not use link TRB */
if ( type ! = TYPE_EVENT ) {
union xhci_trb * trb = & seg - > trbs [ TRBS_PER_SEGMENT - 1 ] ;
trb - > link . segment_ptr = cpu_to_le64 ( dma ) ;
trb - > link . control = cpu_to_le32 ( LINK_TOGGLE | TRB_TYPE ( TRB_LINK ) ) ;
}
INIT_LIST_HEAD ( & ring - > td_list ) ;
xhci_initialize_ring_info ( ring , 1 ) ;
return ring ;
dma_fail :
kfree ( seg ) ;
seg_fail :
kfree ( ring ) ;
return NULL ;
}
2020-07-23 17:45:23 +03:00
static int xhci_dbc_mem_init ( struct xhci_dbc * dbc , gfp_t flags )
2017-12-08 17:59:10 +02:00
{
int ret ;
dma_addr_t deq ;
u32 string_length ;
2020-07-23 17:45:23 +03:00
struct device * dev = dbc - > dev ;
2017-12-08 17:59:10 +02:00
/* Allocate various rings for events and transfers: */
2020-07-23 17:45:22 +03:00
dbc - > ring_evt = xhci_dbc_ring_alloc ( dev , TYPE_EVENT , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ! dbc - > ring_evt )
goto evt_fail ;
2020-07-23 17:45:22 +03:00
dbc - > ring_in = xhci_dbc_ring_alloc ( dev , TYPE_BULK , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ! dbc - > ring_in )
goto in_fail ;
2020-07-23 17:45:22 +03:00
dbc - > ring_out = xhci_dbc_ring_alloc ( dev , TYPE_BULK , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ! dbc - > ring_out )
goto out_fail ;
/* Allocate and populate ERST: */
2020-07-23 17:45:07 +03:00
ret = dbc_erst_alloc ( dev , dbc - > ring_evt , & dbc - > erst , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ret )
goto erst_fail ;
/* Allocate context data structure: */
2020-07-23 17:45:21 +03:00
dbc - > ctx = dbc_alloc_ctx ( dev , flags ) ; /* was sysdev, and is still */
2017-12-08 17:59:10 +02:00
if ( ! dbc - > ctx )
goto ctx_fail ;
/* Allocate the string table: */
dbc - > string_size = sizeof ( struct dbc_str_descs ) ;
2020-07-23 17:45:08 +03:00
dbc - > string = dma_alloc_coherent ( dev , dbc - > string_size ,
& dbc - > string_dma , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ! dbc - > string )
goto string_fail ;
/* Setup ERST register: */
writel ( dbc - > erst . erst_size , & dbc - > regs - > ersts ) ;
2020-07-23 17:45:12 +03:00
lo_hi_writeq ( dbc - > erst . erst_dma_addr , & dbc - > regs - > erstba ) ;
2017-12-08 17:59:10 +02:00
deq = xhci_trb_virt_to_dma ( dbc - > ring_evt - > deq_seg ,
dbc - > ring_evt - > dequeue ) ;
2020-07-23 17:45:12 +03:00
lo_hi_writeq ( deq , & dbc - > regs - > erdp ) ;
2017-12-08 17:59:10 +02:00
/* Setup strings and contexts: */
string_length = xhci_dbc_populate_strings ( dbc - > string ) ;
2020-07-23 17:45:13 +03:00
xhci_dbc_init_contexts ( dbc , string_length ) ;
2017-12-08 17:59:10 +02:00
2020-07-23 17:45:15 +03:00
xhci_dbc_eps_init ( dbc ) ;
2017-12-08 17:59:10 +02:00
dbc - > state = DS_INITIALIZED ;
return 0 ;
string_fail :
2020-07-23 17:45:21 +03:00
dbc_free_ctx ( dev , dbc - > ctx ) ;
2017-12-08 17:59:10 +02:00
dbc - > ctx = NULL ;
ctx_fail :
2020-07-23 17:45:07 +03:00
dbc_erst_free ( dev , & dbc - > erst ) ;
2017-12-08 17:59:10 +02:00
erst_fail :
2020-07-23 17:45:22 +03:00
dbc_ring_free ( dev , dbc - > ring_out ) ;
2017-12-08 17:59:10 +02:00
dbc - > ring_out = NULL ;
out_fail :
2020-07-23 17:45:22 +03:00
dbc_ring_free ( dev , dbc - > ring_in ) ;
2017-12-08 17:59:10 +02:00
dbc - > ring_in = NULL ;
in_fail :
2020-07-23 17:45:22 +03:00
dbc_ring_free ( dev , dbc - > ring_evt ) ;
2017-12-08 17:59:10 +02:00
dbc - > ring_evt = NULL ;
evt_fail :
return - ENOMEM ;
}
2020-07-23 17:45:23 +03:00
static void xhci_dbc_mem_cleanup ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
if ( ! dbc )
return ;
2020-07-23 17:45:15 +03:00
xhci_dbc_eps_exit ( dbc ) ;
2017-12-08 17:59:10 +02:00
if ( dbc - > string ) {
2020-07-23 17:45:09 +03:00
dma_free_coherent ( dbc - > dev , dbc - > string_size ,
dbc - > string , dbc - > string_dma ) ;
2017-12-08 17:59:10 +02:00
dbc - > string = NULL ;
}
2020-07-23 17:45:21 +03:00
dbc_free_ctx ( dbc - > dev , dbc - > ctx ) ;
2017-12-08 17:59:10 +02:00
dbc - > ctx = NULL ;
2020-07-23 17:45:22 +03:00
dbc_erst_free ( dbc - > dev , & dbc - > erst ) ;
dbc_ring_free ( dbc - > dev , dbc - > ring_out ) ;
dbc_ring_free ( dbc - > dev , dbc - > ring_in ) ;
dbc_ring_free ( dbc - > dev , dbc - > ring_evt ) ;
2017-12-08 17:59:10 +02:00
dbc - > ring_in = NULL ;
dbc - > ring_out = NULL ;
dbc - > ring_evt = NULL ;
}
2020-07-23 17:45:24 +03:00
static int xhci_do_dbc_start ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
int ret ;
u32 ctrl ;
if ( dbc - > state ! = DS_DISABLED )
return - EINVAL ;
writel ( 0 , & dbc - > regs - > control ) ;
ret = xhci_handshake ( & dbc - > regs - > control ,
DBC_CTRL_DBC_ENABLE ,
0 , 1000 ) ;
if ( ret )
return ret ;
2020-07-23 17:45:23 +03:00
ret = xhci_dbc_mem_init ( dbc , GFP_ATOMIC ) ;
2017-12-08 17:59:10 +02:00
if ( ret )
return ret ;
ctrl = readl ( & dbc - > regs - > control ) ;
writel ( ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE ,
& dbc - > regs - > control ) ;
ret = xhci_handshake ( & dbc - > regs - > control ,
DBC_CTRL_DBC_ENABLE ,
DBC_CTRL_DBC_ENABLE , 1000 ) ;
if ( ret )
return ret ;
dbc - > state = DS_ENABLED ;
return 0 ;
}
2020-07-23 17:45:16 +03:00
static int xhci_do_dbc_stop ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
if ( dbc - > state = = DS_DISABLED )
2018-07-02 17:13:31 +03:00
return - 1 ;
2017-12-08 17:59:10 +02:00
writel ( 0 , & dbc - > regs - > control ) ;
dbc - > state = DS_DISABLED ;
2018-07-02 17:13:31 +03:00
return 0 ;
2017-12-08 17:59:10 +02:00
}
2020-07-23 17:45:24 +03:00
static int xhci_dbc_start ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
int ret ;
2018-03-08 17:17:15 +02:00
unsigned long flags ;
2017-12-08 17:59:10 +02:00
WARN_ON ( ! dbc ) ;
2020-07-23 17:45:24 +03:00
pm_runtime_get_sync ( dbc - > dev ) ; /* note this was self.controller */
2017-12-08 17:59:10 +02:00
2018-03-08 17:17:15 +02:00
spin_lock_irqsave ( & dbc - > lock , flags ) ;
2020-07-23 17:45:24 +03:00
ret = xhci_do_dbc_start ( dbc ) ;
2018-03-08 17:17:15 +02:00
spin_unlock_irqrestore ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
if ( ret ) {
2020-07-23 17:45:24 +03:00
pm_runtime_put ( dbc - > dev ) ; /* note this was self.controller */
2017-12-08 17:59:10 +02:00
return ret ;
}
return mod_delayed_work ( system_wq , & dbc - > event_work , 1 ) ;
}
2020-07-23 17:45:24 +03:00
static void xhci_dbc_stop ( struct xhci_dbc * dbc )
2017-12-08 17:59:10 +02:00
{
2018-07-02 17:13:31 +03:00
int ret ;
2018-03-08 17:17:15 +02:00
unsigned long flags ;
2017-12-08 17:59:10 +02:00
WARN_ON ( ! dbc ) ;
2020-07-23 17:45:29 +03:00
switch ( dbc - > state ) {
case DS_DISABLED :
return ;
case DS_CONFIGURED :
case DS_STALLED :
if ( dbc - > driver - > disconnect )
dbc - > driver - > disconnect ( dbc ) ;
break ;
default :
break ;
}
2017-12-08 17:59:10 +02:00
2020-07-23 17:45:29 +03:00
cancel_delayed_work_sync ( & dbc - > event_work ) ;
2017-12-08 17:59:10 +02:00
2018-03-08 17:17:15 +02:00
spin_lock_irqsave ( & dbc - > lock , flags ) ;
2020-07-23 17:45:16 +03:00
ret = xhci_do_dbc_stop ( dbc ) ;
2018-03-08 17:17:15 +02:00
spin_unlock_irqrestore ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
2019-03-22 17:50:16 +02:00
if ( ! ret ) {
2020-07-23 17:45:23 +03:00
xhci_dbc_mem_cleanup ( dbc ) ;
2020-07-23 17:45:24 +03:00
pm_runtime_put_sync ( dbc - > dev ) ; /* note, was self.controller */
2019-03-22 17:50:16 +02:00
}
2017-12-08 17:59:10 +02:00
}
static void
2020-07-23 17:45:11 +03:00
dbc_handle_port_status ( struct xhci_dbc * dbc , union xhci_trb * event )
2017-12-08 17:59:10 +02:00
{
u32 portsc ;
portsc = readl ( & dbc - > regs - > portsc ) ;
if ( portsc & DBC_PORTSC_CONN_CHANGE )
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC port connect change \n " ) ;
2017-12-08 17:59:10 +02:00
if ( portsc & DBC_PORTSC_RESET_CHANGE )
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC port reset change \n " ) ;
2017-12-08 17:59:10 +02:00
if ( portsc & DBC_PORTSC_LINK_CHANGE )
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC port link status change \n " ) ;
2017-12-08 17:59:10 +02:00
if ( portsc & DBC_PORTSC_CONFIG_CHANGE )
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC config error change \n " ) ;
2017-12-08 17:59:10 +02:00
/* Port reset change bit will be cleared in other place: */
writel ( portsc & ~ DBC_PORTSC_RESET_CHANGE , & dbc - > regs - > portsc ) ;
}
2020-07-23 17:45:17 +03:00
static void dbc_handle_xfer_event ( struct xhci_dbc * dbc , union xhci_trb * event )
2017-12-08 17:59:10 +02:00
{
struct dbc_ep * dep ;
struct xhci_ring * ring ;
int ep_id ;
int status ;
u32 comp_code ;
size_t remain_length ;
struct dbc_request * req = NULL , * r ;
comp_code = GET_COMP_CODE ( le32_to_cpu ( event - > generic . field [ 2 ] ) ) ;
remain_length = EVENT_TRB_LEN ( le32_to_cpu ( event - > generic . field [ 2 ] ) ) ;
ep_id = TRB_TO_EP_ID ( le32_to_cpu ( event - > generic . field [ 3 ] ) ) ;
dep = ( ep_id = = EPID_OUT ) ?
2020-07-23 17:45:19 +03:00
get_out_ep ( dbc ) : get_in_ep ( dbc ) ;
2017-12-08 17:59:10 +02:00
ring = dep - > ring ;
switch ( comp_code ) {
case COMP_SUCCESS :
remain_length = 0 ;
2020-08-23 17:36:59 -05:00
fallthrough ;
2017-12-08 17:59:10 +02:00
case COMP_SHORT_PACKET :
status = 0 ;
break ;
case COMP_TRB_ERROR :
case COMP_BABBLE_DETECTED_ERROR :
case COMP_USB_TRANSACTION_ERROR :
case COMP_STALL_ERROR :
2020-07-23 17:45:11 +03:00
dev_warn ( dbc - > dev , " tx error %d detected \n " , comp_code ) ;
2017-12-08 17:59:10 +02:00
status = - comp_code ;
break ;
default :
2020-07-23 17:45:11 +03:00
dev_err ( dbc - > dev , " unknown tx error %d \n " , comp_code ) ;
2017-12-08 17:59:10 +02:00
status = - comp_code ;
break ;
}
/* Match the pending request: */
list_for_each_entry ( r , & dep - > list_pending , list_pending ) {
if ( r - > trb_dma = = event - > trans_event . buffer ) {
req = r ;
break ;
}
}
if ( ! req ) {
2020-07-23 17:45:11 +03:00
dev_warn ( dbc - > dev , " no matched request \n " ) ;
2017-12-08 17:59:10 +02:00
return ;
}
trace_xhci_dbc_handle_transfer ( ring , & req - > trb - > generic ) ;
ring - > num_trbs_free + + ;
req - > actual = req - > length - remain_length ;
xhci_dbc_giveback ( req , status ) ;
}
2020-07-23 17:45:05 +03:00
static void inc_evt_deq ( struct xhci_ring * ring )
{
/* If on the last TRB of the segment go back to the beginning */
if ( ring - > dequeue = = & ring - > deq_seg - > trbs [ TRBS_PER_SEGMENT - 1 ] ) {
ring - > cycle_state ^ = 1 ;
ring - > dequeue = ring - > deq_seg - > trbs ;
return ;
}
ring - > dequeue + + ;
}
2017-12-08 17:59:10 +02:00
static enum evtreturn xhci_dbc_do_handle_events ( struct xhci_dbc * dbc )
{
dma_addr_t deq ;
struct dbc_ep * dep ;
union xhci_trb * evt ;
u32 ctrl , portsc ;
bool update_erdp = false ;
/* DbC state machine: */
switch ( dbc - > state ) {
case DS_DISABLED :
case DS_INITIALIZED :
return EVT_ERR ;
case DS_ENABLED :
portsc = readl ( & dbc - > regs - > portsc ) ;
if ( portsc & DBC_PORTSC_CONN_STATUS ) {
dbc - > state = DS_CONNECTED ;
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC connected \n " ) ;
2017-12-08 17:59:10 +02:00
}
return EVT_DONE ;
case DS_CONNECTED :
ctrl = readl ( & dbc - > regs - > control ) ;
if ( ctrl & DBC_CTRL_DBC_RUN ) {
dbc - > state = DS_CONFIGURED ;
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC configured \n " ) ;
2017-12-08 17:59:10 +02:00
portsc = readl ( & dbc - > regs - > portsc ) ;
writel ( portsc , & dbc - > regs - > portsc ) ;
return EVT_GSER ;
}
return EVT_DONE ;
case DS_CONFIGURED :
/* Handle cable unplug event: */
portsc = readl ( & dbc - > regs - > portsc ) ;
if ( ! ( portsc & DBC_PORTSC_PORT_ENABLED ) & &
! ( portsc & DBC_PORTSC_CONN_STATUS ) ) {
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC cable unplugged \n " ) ;
2017-12-08 17:59:10 +02:00
dbc - > state = DS_ENABLED ;
2019-02-20 19:50:55 +02:00
xhci_dbc_flush_requests ( dbc ) ;
2017-12-08 17:59:10 +02:00
return EVT_DISC ;
}
/* Handle debug port reset event: */
if ( portsc & DBC_PORTSC_RESET_CHANGE ) {
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC port reset \n " ) ;
2017-12-08 17:59:10 +02:00
writel ( portsc , & dbc - > regs - > portsc ) ;
dbc - > state = DS_ENABLED ;
2019-02-20 19:50:55 +02:00
xhci_dbc_flush_requests ( dbc ) ;
2017-12-08 17:59:10 +02:00
return EVT_DISC ;
}
/* Handle endpoint stall event: */
ctrl = readl ( & dbc - > regs - > control ) ;
if ( ( ctrl & DBC_CTRL_HALT_IN_TR ) | |
( ctrl & DBC_CTRL_HALT_OUT_TR ) ) {
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " DbC Endpoint stall \n " ) ;
2017-12-08 17:59:10 +02:00
dbc - > state = DS_STALLED ;
if ( ctrl & DBC_CTRL_HALT_IN_TR ) {
2020-07-23 17:45:19 +03:00
dep = get_in_ep ( dbc ) ;
2017-12-08 17:59:10 +02:00
xhci_dbc_flush_endpoint_requests ( dep ) ;
}
if ( ctrl & DBC_CTRL_HALT_OUT_TR ) {
2020-07-23 17:45:19 +03:00
dep = get_out_ep ( dbc ) ;
2017-12-08 17:59:10 +02:00
xhci_dbc_flush_endpoint_requests ( dep ) ;
}
return EVT_DONE ;
}
/* Clear DbC run change bit: */
if ( ctrl & DBC_CTRL_DBC_RUN_CHANGE ) {
writel ( ctrl , & dbc - > regs - > control ) ;
ctrl = readl ( & dbc - > regs - > control ) ;
}
break ;
case DS_STALLED :
ctrl = readl ( & dbc - > regs - > control ) ;
if ( ! ( ctrl & DBC_CTRL_HALT_IN_TR ) & &
! ( ctrl & DBC_CTRL_HALT_OUT_TR ) & &
( ctrl & DBC_CTRL_DBC_RUN ) ) {
dbc - > state = DS_CONFIGURED ;
break ;
}
return EVT_DONE ;
default :
2020-07-23 17:45:11 +03:00
dev_err ( dbc - > dev , " Unknown DbC state %d \n " , dbc - > state ) ;
2017-12-08 17:59:10 +02:00
break ;
}
/* Handle the events in the event ring: */
evt = dbc - > ring_evt - > dequeue ;
while ( ( le32_to_cpu ( evt - > event_cmd . flags ) & TRB_CYCLE ) = =
dbc - > ring_evt - > cycle_state ) {
/*
* Add a barrier between reading the cycle flag and any
* reads of the event ' s flags / data below :
*/
rmb ( ) ;
trace_xhci_dbc_handle_event ( dbc - > ring_evt , & evt - > generic ) ;
switch ( le32_to_cpu ( evt - > event_cmd . flags ) & TRB_TYPE_BITMASK ) {
case TRB_TYPE ( TRB_PORT_STATUS ) :
2020-07-23 17:45:11 +03:00
dbc_handle_port_status ( dbc , evt ) ;
2017-12-08 17:59:10 +02:00
break ;
case TRB_TYPE ( TRB_TRANSFER ) :
2020-07-23 17:45:17 +03:00
dbc_handle_xfer_event ( dbc , evt ) ;
2017-12-08 17:59:10 +02:00
break ;
default :
break ;
}
2020-07-23 17:45:05 +03:00
inc_evt_deq ( dbc - > ring_evt ) ;
2017-12-08 17:59:10 +02:00
evt = dbc - > ring_evt - > dequeue ;
update_erdp = true ;
}
/* Update event ring dequeue pointer: */
if ( update_erdp ) {
deq = xhci_trb_virt_to_dma ( dbc - > ring_evt - > deq_seg ,
dbc - > ring_evt - > dequeue ) ;
2020-07-23 17:45:12 +03:00
lo_hi_writeq ( deq , & dbc - > regs - > erdp ) ;
2017-12-08 17:59:10 +02:00
}
return EVT_DONE ;
}
static void xhci_dbc_handle_events ( struct work_struct * work )
{
enum evtreturn evtr ;
struct xhci_dbc * dbc ;
2018-03-08 17:17:15 +02:00
unsigned long flags ;
2017-12-08 17:59:10 +02:00
dbc = container_of ( to_delayed_work ( work ) , struct xhci_dbc , event_work ) ;
2018-03-08 17:17:15 +02:00
spin_lock_irqsave ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
evtr = xhci_dbc_do_handle_events ( dbc ) ;
2018-03-08 17:17:15 +02:00
spin_unlock_irqrestore ( & dbc - > lock , flags ) ;
2017-12-08 17:59:10 +02:00
switch ( evtr ) {
case EVT_GSER :
2020-07-23 17:45:28 +03:00
if ( dbc - > driver - > configure )
dbc - > driver - > configure ( dbc ) ;
2017-12-08 17:59:10 +02:00
break ;
case EVT_DISC :
2020-07-23 17:45:28 +03:00
if ( dbc - > driver - > disconnect )
dbc - > driver - > disconnect ( dbc ) ;
2017-12-08 17:59:10 +02:00
break ;
case EVT_DONE :
break ;
default :
2020-07-23 17:45:11 +03:00
dev_info ( dbc - > dev , " stop handling dbc events \n " ) ;
2017-12-08 17:59:10 +02:00
return ;
}
mod_delayed_work ( system_wq , & dbc - > event_work , 1 ) ;
}
static void xhci_do_dbc_exit ( struct xhci_hcd * xhci )
{
unsigned long flags ;
spin_lock_irqsave ( & xhci - > lock , flags ) ;
kfree ( xhci - > dbc ) ;
xhci - > dbc = NULL ;
spin_unlock_irqrestore ( & xhci - > lock , flags ) ;
}
static int xhci_do_dbc_init ( struct xhci_hcd * xhci )
{
u32 reg ;
struct xhci_dbc * dbc ;
unsigned long flags ;
void __iomem * base ;
int dbc_cap_offs ;
base = & xhci - > cap_regs - > hc_capbase ;
dbc_cap_offs = xhci_find_next_ext_cap ( base , 0 , XHCI_EXT_CAPS_DEBUG ) ;
if ( ! dbc_cap_offs )
return - ENODEV ;
dbc = kzalloc ( sizeof ( * dbc ) , GFP_KERNEL ) ;
if ( ! dbc )
return - ENOMEM ;
dbc - > regs = base + dbc_cap_offs ;
/* We will avoid using DbC in xhci driver if it's in use. */
reg = readl ( & dbc - > regs - > control ) ;
if ( reg & DBC_CTRL_DBC_ENABLE ) {
kfree ( dbc ) ;
return - EBUSY ;
}
spin_lock_irqsave ( & xhci - > lock , flags ) ;
if ( xhci - > dbc ) {
spin_unlock_irqrestore ( & xhci - > lock , flags ) ;
kfree ( dbc ) ;
return - EBUSY ;
}
xhci - > dbc = dbc ;
spin_unlock_irqrestore ( & xhci - > lock , flags ) ;
dbc - > xhci = xhci ;
2020-07-23 17:45:10 +03:00
dbc - > dev = xhci_to_hcd ( xhci ) - > self . sysdev ;
2017-12-08 17:59:10 +02:00
INIT_DELAYED_WORK ( & dbc - > event_work , xhci_dbc_handle_events ) ;
spin_lock_init ( & dbc - > lock ) ;
return 0 ;
}
static ssize_t dbc_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
const char * p ;
struct xhci_dbc * dbc ;
struct xhci_hcd * xhci ;
xhci = hcd_to_xhci ( dev_get_drvdata ( dev ) ) ;
dbc = xhci - > dbc ;
switch ( dbc - > state ) {
case DS_DISABLED :
p = " disabled " ;
break ;
case DS_INITIALIZED :
p = " initialized " ;
break ;
case DS_ENABLED :
p = " enabled " ;
break ;
case DS_CONNECTED :
p = " connected " ;
break ;
case DS_CONFIGURED :
p = " configured " ;
break ;
case DS_STALLED :
p = " stalled " ;
break ;
default :
p = " unknown " ;
}
return sprintf ( buf , " %s \n " , p ) ;
}
static ssize_t dbc_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
struct xhci_hcd * xhci ;
2020-07-23 17:45:24 +03:00
struct xhci_dbc * dbc ;
2017-12-08 17:59:10 +02:00
xhci = hcd_to_xhci ( dev_get_drvdata ( dev ) ) ;
2020-07-23 17:45:24 +03:00
dbc = xhci - > dbc ;
2017-12-08 17:59:10 +02:00
if ( ! strncmp ( buf , " enable " , 6 ) )
2020-07-23 17:45:24 +03:00
xhci_dbc_start ( dbc ) ;
2017-12-08 17:59:10 +02:00
else if ( ! strncmp ( buf , " disable " , 7 ) )
2020-07-23 17:45:24 +03:00
xhci_dbc_stop ( dbc ) ;
2017-12-08 17:59:10 +02:00
else
return - EINVAL ;
return count ;
}
2018-01-23 11:24:05 +01:00
static DEVICE_ATTR_RW ( dbc ) ;
2017-12-08 17:59:10 +02:00
int xhci_dbc_init ( struct xhci_hcd * xhci )
{
int ret ;
struct device * dev = xhci_to_hcd ( xhci ) - > self . controller ;
ret = xhci_do_dbc_init ( xhci ) ;
if ( ret )
goto init_err3 ;
2020-07-23 17:45:27 +03:00
ret = xhci_dbc_tty_probe ( xhci ) ;
2017-12-08 17:59:10 +02:00
if ( ret )
goto init_err2 ;
ret = device_create_file ( dev , & dev_attr_dbc ) ;
if ( ret )
goto init_err1 ;
return 0 ;
init_err1 :
2020-07-23 17:45:27 +03:00
xhci_dbc_tty_remove ( xhci - > dbc ) ;
2017-12-08 17:59:10 +02:00
init_err2 :
xhci_do_dbc_exit ( xhci ) ;
init_err3 :
return ret ;
}
void xhci_dbc_exit ( struct xhci_hcd * xhci )
{
struct device * dev = xhci_to_hcd ( xhci ) - > self . controller ;
if ( ! xhci - > dbc )
return ;
device_remove_file ( dev , & dev_attr_dbc ) ;
2020-07-23 17:45:27 +03:00
xhci_dbc_tty_remove ( xhci - > dbc ) ;
2020-07-23 17:45:24 +03:00
xhci_dbc_stop ( xhci - > dbc ) ;
2017-12-08 17:59:10 +02:00
xhci_do_dbc_exit ( xhci ) ;
}
# ifdef CONFIG_PM
int xhci_dbc_suspend ( struct xhci_hcd * xhci )
{
struct xhci_dbc * dbc = xhci - > dbc ;
if ( ! dbc )
return 0 ;
if ( dbc - > state = = DS_CONFIGURED )
dbc - > resume_required = 1 ;
2020-07-23 17:45:24 +03:00
xhci_dbc_stop ( dbc ) ;
2017-12-08 17:59:10 +02:00
return 0 ;
}
int xhci_dbc_resume ( struct xhci_hcd * xhci )
{
int ret = 0 ;
struct xhci_dbc * dbc = xhci - > dbc ;
if ( ! dbc )
return 0 ;
if ( dbc - > resume_required ) {
dbc - > resume_required = 0 ;
2020-07-23 17:45:24 +03:00
xhci_dbc_start ( dbc ) ;
2017-12-08 17:59:10 +02:00
}
return ret ;
}
# endif /* CONFIG_PM */