2019-10-28 19:24:37 +02:00
// SPDX-License-Identifier: ISC
2015-04-30 16:25:09 +03:00
/*
2017-01-20 13:49:46 +02:00
* Copyright ( c ) 2012 - 2015 , 2017 Qualcomm Atheros , Inc .
2018-02-26 20:12:13 +02:00
* Copyright ( c ) 2018 , The Linux Foundation . All rights reserved .
2015-04-30 16:25:09 +03:00
*/
# include <linux/types.h>
# include <linux/errno.h>
# include <linux/fs.h>
2019-09-10 16:46:24 +03:00
# include <linux/seq_file.h>
2015-04-30 16:25:09 +03:00
# include "wmi.h"
# include "wil6210.h"
# include "txrx.h"
# include "pmc.h"
struct desc_alloc_info {
dma_addr_t pa ;
void * va ;
} ;
static int wil_is_pmc_allocated ( struct pmc_ctx * pmc )
{
return ! ! pmc - > pring_va ;
}
void wil_pmc_init ( struct wil6210_priv * wil )
{
memset ( & wil - > pmc , 0 , sizeof ( struct pmc_ctx ) ) ;
mutex_init ( & wil - > pmc . lock ) ;
}
2020-08-27 08:37:18 +01:00
/* Allocate the physical ring (p-ring) and the required
2015-04-30 16:25:09 +03:00
* number of descriptors of required size .
* Initialize the descriptors as required by pmc dma .
* The descriptors ' buffers dwords are initialized to hold
* dword ' s serial number in the lsw and reserved value
* PCM_DATA_INVALID_DW_VAL in the msw .
*/
void wil_pmc_alloc ( struct wil6210_priv * wil ,
int num_descriptors ,
int descriptor_size )
{
u32 i ;
struct pmc_ctx * pmc = & wil - > pmc ;
struct device * dev = wil_to_dev ( wil ) ;
2018-02-26 20:12:13 +02:00
struct wil6210_vif * vif = ndev_to_vif ( wil - > main_ndev ) ;
2015-04-30 16:25:09 +03:00
struct wmi_pmc_cmd pmc_cmd = { 0 } ;
2016-11-28 13:49:01 +02:00
int last_cmd_err = - ENOMEM ;
2015-04-30 16:25:09 +03:00
mutex_lock ( & pmc - > lock ) ;
if ( wil_is_pmc_allocated ( pmc ) ) {
/* sanity check */
2017-01-20 13:49:46 +02:00
wil_err ( wil , " ERROR pmc is already allocated \n " ) ;
2015-04-30 16:25:09 +03:00
goto no_release_err ;
}
2016-11-28 13:49:01 +02:00
if ( ( num_descriptors < = 0 ) | | ( descriptor_size < = 0 ) ) {
wil_err ( wil ,
" Invalid params num_descriptors(%d), descriptor_size(%d) \n " ,
num_descriptors , descriptor_size ) ;
last_cmd_err = - EINVAL ;
goto no_release_err ;
}
if ( num_descriptors > ( 1 < < WIL_RING_SIZE_ORDER_MAX ) ) {
wil_err ( wil ,
" num_descriptors(%d) exceeds max ring size %d \n " ,
num_descriptors , 1 < < WIL_RING_SIZE_ORDER_MAX ) ;
last_cmd_err = - EINVAL ;
goto no_release_err ;
}
if ( num_descriptors > INT_MAX / descriptor_size ) {
wil_err ( wil ,
" Overflow in num_descriptors(%d)*descriptor_size(%d) \n " ,
num_descriptors , descriptor_size ) ;
last_cmd_err = - EINVAL ;
goto no_release_err ;
}
2015-04-30 16:25:09 +03:00
pmc - > num_descriptors = num_descriptors ;
pmc - > descriptor_size = descriptor_size ;
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_alloc: %d descriptors x %d bytes each \n " ,
num_descriptors , descriptor_size ) ;
2015-04-30 16:25:09 +03:00
/* allocate descriptors info list in pmc context*/
pmc - > descriptors = kcalloc ( num_descriptors ,
sizeof ( struct desc_alloc_info ) ,
GFP_KERNEL ) ;
if ( ! pmc - > descriptors ) {
2017-01-20 13:49:46 +02:00
wil_err ( wil , " ERROR allocating pmc skb list \n " ) ;
2015-04-30 16:25:09 +03:00
goto no_release_err ;
}
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_alloc: allocated descriptors info list %p \n " ,
pmc - > descriptors ) ;
2015-04-30 16:25:09 +03:00
/* Allocate pring buffer and descriptors.
* vring - > va should be aligned on its size rounded up to power of 2
2017-03-08 13:52:10 +02:00
* This is granted by the dma_alloc_coherent .
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address . To workaround that ,
2017-12-14 18:53:06 +02:00
* if we are using more than 32 bit addresses switch to 32 bit
* allocation before allocating vring memory .
2017-03-08 13:52:10 +02:00
*
* There ' s no check for the return value of dma_set_mask_and_coherent ,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
2015-04-30 16:25:09 +03:00
*/
2017-12-14 18:53:06 +02:00
if ( wil - > dma_addr_size > 32 )
2017-03-08 13:52:10 +02:00
dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 32 ) ) ;
2015-04-30 16:25:09 +03:00
pmc - > pring_va = dma_alloc_coherent ( dev ,
sizeof ( struct vring_tx_desc ) * num_descriptors ,
& pmc - > pring_pa ,
GFP_KERNEL ) ;
2017-03-08 13:52:10 +02:00
2017-12-14 18:53:06 +02:00
if ( wil - > dma_addr_size > 32 )
dma_set_mask_and_coherent ( dev ,
DMA_BIT_MASK ( wil - > dma_addr_size ) ) ;
2015-04-30 16:25:09 +03:00
wil_dbg_misc ( wil ,
2017-01-20 13:49:46 +02:00
" pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes \n " ,
2015-05-11 19:10:48 +03:00
pmc - > pring_va , & pmc - > pring_pa ,
2015-04-30 16:25:09 +03:00
sizeof ( struct vring_tx_desc ) ,
num_descriptors ,
sizeof ( struct vring_tx_desc ) * num_descriptors ) ;
if ( ! pmc - > pring_va ) {
2017-01-20 13:49:46 +02:00
wil_err ( wil , " ERROR allocating pmc pring \n " ) ;
2015-04-30 16:25:09 +03:00
goto release_pmc_skb_list ;
}
/* initially, all descriptors are SW owned
* For Tx , Rx , and PMC , ownership bit is at the same location , thus
* we can use any
*/
for ( i = 0 ; i < num_descriptors ; i + + ) {
struct vring_tx_desc * _d = & pmc - > pring_va [ i ] ;
2015-10-04 10:23:21 +03:00
struct vring_tx_desc dd = { } , * d = & dd ;
2015-04-30 16:25:09 +03:00
int j = 0 ;
pmc - > descriptors [ i ] . va = dma_alloc_coherent ( dev ,
descriptor_size ,
& pmc - > descriptors [ i ] . pa ,
GFP_KERNEL ) ;
if ( unlikely ( ! pmc - > descriptors [ i ] . va ) ) {
2017-01-20 13:49:46 +02:00
wil_err ( wil , " ERROR allocating pmc descriptor %d " , i ) ;
2015-04-30 16:25:09 +03:00
goto release_pmc_skbs ;
}
for ( j = 0 ; j < descriptor_size / sizeof ( u32 ) ; j + + ) {
u32 * p = ( u32 * ) pmc - > descriptors [ i ] . va + j ;
* p = PCM_DATA_INVALID_DW_VAL | j ;
}
/* configure dma descriptor */
d - > dma . addr . addr_low =
cpu_to_le32 ( lower_32_bits ( pmc - > descriptors [ i ] . pa ) ) ;
d - > dma . addr . addr_high =
cpu_to_le16 ( ( u16 ) upper_32_bits ( pmc - > descriptors [ i ] . pa ) ) ;
d - > dma . status = 0 ; /* 0 = HW_OWNED */
d - > dma . length = cpu_to_le16 ( descriptor_size ) ;
d - > dma . d0 = BIT ( 9 ) | RX_DMA_D0_CMD_DMA_IT ;
* _d = * d ;
}
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_alloc: allocated successfully \n " ) ;
2015-04-30 16:25:09 +03:00
pmc_cmd . op = WMI_PMC_ALLOCATE ;
pmc_cmd . ring_size = cpu_to_le16 ( pmc - > num_descriptors ) ;
pmc_cmd . mem_base = cpu_to_le64 ( pmc - > pring_pa ) ;
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_alloc: send WMI_PMC_CMD with ALLOCATE op \n " ) ;
2015-04-30 16:25:09 +03:00
pmc - > last_cmd_status = wmi_send ( wil ,
WMI_PMC_CMDID ,
2018-02-26 20:12:13 +02:00
vif - > mid ,
2015-04-30 16:25:09 +03:00
& pmc_cmd ,
sizeof ( pmc_cmd ) ) ;
if ( pmc - > last_cmd_status ) {
wil_err ( wil ,
2017-01-20 13:49:46 +02:00
" WMI_PMC_CMD with ALLOCATE op failed with status %d " ,
pmc - > last_cmd_status ) ;
2015-04-30 16:25:09 +03:00
goto release_pmc_skbs ;
}
mutex_unlock ( & pmc - > lock ) ;
return ;
release_pmc_skbs :
2017-01-20 13:49:46 +02:00
wil_err ( wil , " exit on error: Releasing skbs... \n " ) ;
2017-04-05 14:58:13 +03:00
for ( i = 0 ; i < num_descriptors & & pmc - > descriptors [ i ] . va ; i + + ) {
2015-04-30 16:25:09 +03:00
dma_free_coherent ( dev ,
descriptor_size ,
pmc - > descriptors [ i ] . va ,
pmc - > descriptors [ i ] . pa ) ;
pmc - > descriptors [ i ] . va = NULL ;
}
2017-01-20 13:49:46 +02:00
wil_err ( wil , " exit on error: Releasing pring... \n " ) ;
2015-04-30 16:25:09 +03:00
dma_free_coherent ( dev ,
sizeof ( struct vring_tx_desc ) * num_descriptors ,
pmc - > pring_va ,
pmc - > pring_pa ) ;
pmc - > pring_va = NULL ;
release_pmc_skb_list :
2017-01-20 13:49:46 +02:00
wil_err ( wil , " exit on error: Releasing descriptors info list... \n " ) ;
2015-04-30 16:25:09 +03:00
kfree ( pmc - > descriptors ) ;
pmc - > descriptors = NULL ;
no_release_err :
2016-11-28 13:49:01 +02:00
pmc - > last_cmd_status = last_cmd_err ;
2015-04-30 16:25:09 +03:00
mutex_unlock ( & pmc - > lock ) ;
}
2020-08-27 08:37:18 +01:00
/* Traverse the p-ring and release all buffers.
2015-04-30 16:25:09 +03:00
* At the end release the p - ring memory
*/
void wil_pmc_free ( struct wil6210_priv * wil , int send_pmc_cmd )
{
struct pmc_ctx * pmc = & wil - > pmc ;
struct device * dev = wil_to_dev ( wil ) ;
2018-02-26 20:12:13 +02:00
struct wil6210_vif * vif = ndev_to_vif ( wil - > main_ndev ) ;
2015-04-30 16:25:09 +03:00
struct wmi_pmc_cmd pmc_cmd = { 0 } ;
mutex_lock ( & pmc - > lock ) ;
pmc - > last_cmd_status = 0 ;
if ( ! wil_is_pmc_allocated ( pmc ) ) {
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil ,
" pmc_free: Error, can't free - not allocated \n " ) ;
2015-04-30 16:25:09 +03:00
pmc - > last_cmd_status = - EPERM ;
mutex_unlock ( & pmc - > lock ) ;
return ;
}
if ( send_pmc_cmd ) {
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " send WMI_PMC_CMD with RELEASE op \n " ) ;
2015-04-30 16:25:09 +03:00
pmc_cmd . op = WMI_PMC_RELEASE ;
pmc - > last_cmd_status =
2018-02-26 20:12:13 +02:00
wmi_send ( wil , WMI_PMC_CMDID , vif - > mid ,
& pmc_cmd , sizeof ( pmc_cmd ) ) ;
2015-04-30 16:25:09 +03:00
if ( pmc - > last_cmd_status ) {
wil_err ( wil ,
2017-01-20 13:49:46 +02:00
" WMI_PMC_CMD with RELEASE op failed, status %d " ,
pmc - > last_cmd_status ) ;
2015-04-30 16:25:09 +03:00
/* There's nothing we can do with this error.
* Normally , it should never occur .
* Continue to freeing all memory allocated for pmc .
*/
}
}
if ( pmc - > pring_va ) {
size_t buf_size = sizeof ( struct vring_tx_desc ) *
pmc - > num_descriptors ;
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_free: free pring va %p \n " ,
pmc - > pring_va ) ;
2015-04-30 16:25:09 +03:00
dma_free_coherent ( dev , buf_size , pmc - > pring_va , pmc - > pring_pa ) ;
pmc - > pring_va = NULL ;
} else {
pmc - > last_cmd_status = - ENOENT ;
}
if ( pmc - > descriptors ) {
int i ;
for ( i = 0 ;
2017-04-05 14:58:13 +03:00
i < pmc - > num_descriptors & & pmc - > descriptors [ i ] . va ; i + + ) {
2015-04-30 16:25:09 +03:00
dma_free_coherent ( dev ,
pmc - > descriptor_size ,
pmc - > descriptors [ i ] . va ,
pmc - > descriptors [ i ] . pa ) ;
pmc - > descriptors [ i ] . va = NULL ;
}
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_free: free descriptor info %d/%d \n " , i ,
pmc - > num_descriptors ) ;
2015-04-30 16:25:09 +03:00
wil_dbg_misc ( wil ,
2017-01-20 13:49:46 +02:00
" pmc_free: free pmc descriptors info list %p \n " ,
pmc - > descriptors ) ;
2015-04-30 16:25:09 +03:00
kfree ( pmc - > descriptors ) ;
pmc - > descriptors = NULL ;
} else {
pmc - > last_cmd_status = - ENOENT ;
}
mutex_unlock ( & pmc - > lock ) ;
}
2020-08-27 08:37:18 +01:00
/* Status of the last operation requested via debugfs: alloc/free/read.
2015-04-30 16:25:09 +03:00
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status ( struct wil6210_priv * wil )
{
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil , " pmc_last_cmd_status: status %d \n " ,
2015-04-30 16:25:09 +03:00
wil - > pmc . last_cmd_status ) ;
return wil - > pmc . last_cmd_status ;
}
2020-08-27 08:37:18 +01:00
/* Read from required position up to the end of current descriptor,
2015-04-30 16:25:09 +03:00
* depends on descriptor size configured during alloc request .
*/
ssize_t wil_pmc_read ( struct file * filp , char __user * buf , size_t count ,
loff_t * f_pos )
{
struct wil6210_priv * wil = filp - > private_data ;
struct pmc_ctx * pmc = & wil - > pmc ;
size_t retval = 0 ;
unsigned long long idx ;
loff_t offset ;
2016-11-28 13:49:01 +02:00
size_t pmc_size ;
2015-04-30 16:25:09 +03:00
mutex_lock ( & pmc - > lock ) ;
if ( ! wil_is_pmc_allocated ( pmc ) ) {
2017-01-20 13:49:46 +02:00
wil_err ( wil , " error, pmc is not allocated! \n " ) ;
2015-04-30 16:25:09 +03:00
pmc - > last_cmd_status = - EPERM ;
mutex_unlock ( & pmc - > lock ) ;
return - EPERM ;
}
2016-11-28 13:49:01 +02:00
pmc_size = pmc - > descriptor_size * pmc - > num_descriptors ;
2015-04-30 16:25:09 +03:00
wil_dbg_misc ( wil ,
2017-01-20 13:49:46 +02:00
" pmc_read: size %u, pos %lld \n " ,
( u32 ) count , * f_pos ) ;
2015-04-30 16:25:09 +03:00
pmc - > last_cmd_status = 0 ;
idx = * f_pos ;
do_div ( idx , pmc - > descriptor_size ) ;
offset = * f_pos - ( idx * pmc - > descriptor_size ) ;
if ( * f_pos > = pmc_size ) {
2017-01-20 13:49:46 +02:00
wil_dbg_misc ( wil ,
" pmc_read: reached end of pmc buf: %lld >= %u \n " ,
* f_pos , ( u32 ) pmc_size ) ;
2015-04-30 16:25:09 +03:00
pmc - > last_cmd_status = - ERANGE ;
goto out ;
}
wil_dbg_misc ( wil ,
2017-01-20 13:49:46 +02:00
" pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes \n " ,
* f_pos , idx , offset , count ) ;
2015-04-30 16:25:09 +03:00
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer ( buf ,
count ,
& offset ,
pmc - > descriptors [ idx ] . va ,
pmc - > descriptor_size ) ;
* f_pos + = retval ;
out :
mutex_unlock ( & pmc - > lock ) ;
return retval ;
}
loff_t wil_pmc_llseek ( struct file * filp , loff_t off , int whence )
{
loff_t newpos ;
struct wil6210_priv * wil = filp - > private_data ;
struct pmc_ctx * pmc = & wil - > pmc ;
2016-11-28 13:49:01 +02:00
size_t pmc_size ;
mutex_lock ( & pmc - > lock ) ;
if ( ! wil_is_pmc_allocated ( pmc ) ) {
wil_err ( wil , " error, pmc is not allocated! \n " ) ;
pmc - > last_cmd_status = - EPERM ;
mutex_unlock ( & pmc - > lock ) ;
return - EPERM ;
}
pmc_size = pmc - > descriptor_size * pmc - > num_descriptors ;
2015-04-30 16:25:09 +03:00
switch ( whence ) {
case 0 : /* SEEK_SET */
newpos = off ;
break ;
case 1 : /* SEEK_CUR */
newpos = filp - > f_pos + off ;
break ;
case 2 : /* SEEK_END */
newpos = pmc_size ;
break ;
default : /* can't happen */
2016-11-28 13:49:01 +02:00
newpos = - EINVAL ;
goto out ;
2015-04-30 16:25:09 +03:00
}
2016-11-28 13:49:01 +02:00
if ( newpos < 0 ) {
newpos = - EINVAL ;
goto out ;
}
2015-04-30 16:25:09 +03:00
if ( newpos > pmc_size )
newpos = pmc_size ;
filp - > f_pos = newpos ;
2016-11-28 13:49:01 +02:00
out :
mutex_unlock ( & pmc - > lock ) ;
2015-04-30 16:25:09 +03:00
return newpos ;
}
2019-09-10 16:46:24 +03:00
int wil_pmcring_read ( struct seq_file * s , void * data )
{
struct wil6210_priv * wil = s - > private ;
struct pmc_ctx * pmc = & wil - > pmc ;
size_t pmc_ring_size =
sizeof ( struct vring_rx_desc ) * pmc - > num_descriptors ;
mutex_lock ( & pmc - > lock ) ;
if ( ! wil_is_pmc_allocated ( pmc ) ) {
wil_err ( wil , " error, pmc is not allocated! \n " ) ;
pmc - > last_cmd_status = - EPERM ;
mutex_unlock ( & pmc - > lock ) ;
return - EPERM ;
}
wil_dbg_misc ( wil , " pmcring_read: size %zu \n " , pmc_ring_size ) ;
seq_write ( s , pmc - > pring_va , pmc_ring_size ) ;
mutex_unlock ( & pmc - > lock ) ;
return 0 ;
}