2020-05-27 16:21:28 +01:00
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
2015-08-07 11:34:20 -07:00
# include "adf_accel_devices.h"
2015-12-04 16:56:28 -08:00
# include "adf_common_drv.h"
2015-08-07 11:34:20 -07:00
# include "adf_transport_internal.h"
2014-06-05 13:44:20 -07:00
# define ADF_ARB_NUM 4
# define ADF_ARB_REG_SIZE 0x4
2020-10-12 21:38:31 +01:00
# define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
ADF_CSR_WR ( csr_addr , ( arb_offset ) + \
( ADF_ARB_REG_SIZE * ( index ) ) , value )
2014-06-05 13:44:20 -07:00
2020-10-12 21:38:31 +01:00
# define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
ADF_CSR_WR ( csr_addr , ( ( arb_offset ) + ( wt_offset ) ) + \
( ADF_ARB_REG_SIZE * ( index ) ) , value )
2014-06-05 13:44:20 -07:00
int adf_init_arb ( struct adf_accel_dev * accel_dev )
{
2015-08-07 11:34:20 -07:00
struct adf_hw_device_data * hw_data = accel_dev - > hw_device ;
2014-06-05 13:44:20 -07:00
void __iomem * csr = accel_dev - > transport - > banks [ 0 ] . csr_addr ;
2021-01-04 16:55:46 +00:00
unsigned long ae_mask = hw_data - > ae_mask ;
2020-10-12 21:38:31 +01:00
u32 arb_off , wt_off , arb_cfg ;
2015-08-07 11:34:20 -07:00
const u32 * thd_2_arb_cfg ;
2020-10-12 21:38:31 +01:00
struct arb_info info ;
int arb , i ;
hw_data - > get_arb_info ( & info ) ;
arb_cfg = info . arb_cfg ;
arb_off = info . arb_offset ;
wt_off = info . wt2sam_offset ;
2014-06-05 13:44:20 -07:00
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled . */
for ( arb = 0 ; arb < ADF_ARB_NUM ; arb + + )
2020-10-12 21:38:31 +01:00
WRITE_CSR_ARB_SARCONFIG ( csr , arb_off , arb , arb_cfg ) ;
2014-06-05 13:44:20 -07:00
/* Map worker threads to service arbiters */
2023-03-06 11:09:23 -05:00
thd_2_arb_cfg = hw_data - > get_arb_mapping ( accel_dev ) ;
2014-06-05 13:44:20 -07:00
2021-01-04 16:55:46 +00:00
for_each_set_bit ( i , & ae_mask , hw_data - > num_engines )
2020-10-12 21:38:31 +01:00
WRITE_CSR_ARB_WT2SAM ( csr , arb_off , wt_off , i , thd_2_arb_cfg [ i ] ) ;
2014-06-05 13:44:20 -07:00
return 0 ;
}
2015-08-07 11:34:20 -07:00
EXPORT_SYMBOL_GPL ( adf_init_arb ) ;
void adf_update_ring_arb ( struct adf_etr_ring_data * ring )
2014-06-05 13:44:20 -07:00
{
2020-10-12 21:38:34 +01:00
struct adf_accel_dev * accel_dev = ring - > bank - > accel_dev ;
struct adf_hw_device_data * hw_data = accel_dev - > hw_device ;
2020-10-12 21:38:38 +01:00
struct adf_hw_csr_ops * csr_ops = GET_CSR_OPS ( accel_dev ) ;
2020-10-12 21:38:34 +01:00
u32 tx_ring_mask = hw_data - > tx_rings_mask ;
u32 shift = hw_data - > tx_rx_gap ;
u32 arben , arben_tx , arben_rx ;
u32 rx_ring_mask ;
/*
* Enable arbitration on a ring only if the TX half of the ring mask
* matches the RX part . This results in writes to CSR on both TX and
* RX update - only one is necessary , but both are done for
* simplicity .
*/
rx_ring_mask = tx_ring_mask < < shift ;
arben_tx = ( ring - > bank - > ring_mask & tx_ring_mask ) > > 0 ;
arben_rx = ( ring - > bank - > ring_mask & rx_ring_mask ) > > shift ;
arben = arben_tx & arben_rx ;
2020-10-12 21:38:38 +01:00
csr_ops - > write_csr_ring_srv_arb_en ( ring - > bank - > csr_addr ,
ring - > bank - > bank_number , arben ) ;
2014-06-05 13:44:20 -07:00
}
void adf_exit_arb ( struct adf_accel_dev * accel_dev )
{
2015-08-07 11:34:20 -07:00
struct adf_hw_device_data * hw_data = accel_dev - > hw_device ;
2020-10-12 21:38:38 +01:00
struct adf_hw_csr_ops * csr_ops = GET_CSR_OPS ( accel_dev ) ;
2020-10-12 21:38:31 +01:00
u32 arb_off , wt_off ;
struct arb_info info ;
2014-06-05 13:44:20 -07:00
void __iomem * csr ;
unsigned int i ;
2020-10-12 21:38:31 +01:00
hw_data - > get_arb_info ( & info ) ;
arb_off = info . arb_offset ;
wt_off = info . wt2sam_offset ;
2014-06-05 13:44:20 -07:00
if ( ! accel_dev - > transport )
return ;
csr = accel_dev - > transport - > banks [ 0 ] . csr_addr ;
2020-10-12 21:38:31 +01:00
hw_data - > get_arb_info ( & info ) ;
2014-06-05 13:44:20 -07:00
/* Reset arbiter configuration */
for ( i = 0 ; i < ADF_ARB_NUM ; i + + )
2020-10-12 21:38:31 +01:00
WRITE_CSR_ARB_SARCONFIG ( csr , arb_off , i , 0 ) ;
2014-06-05 13:44:20 -07:00
/* Unmap worker threads to service arbiters */
2015-08-07 11:34:20 -07:00
for ( i = 0 ; i < hw_data - > num_engines ; i + + )
2020-10-12 21:38:31 +01:00
WRITE_CSR_ARB_WT2SAM ( csr , arb_off , wt_off , i , 0 ) ;
2014-06-05 13:44:20 -07:00
/* Disable arbitration on all rings */
for ( i = 0 ; i < GET_MAX_BANKS ( accel_dev ) ; i + + )
2020-10-12 21:38:38 +01:00
csr_ops - > write_csr_ring_srv_arb_en ( csr , i , 0 ) ;
2014-06-05 13:44:20 -07:00
}
2015-08-07 11:34:20 -07:00
EXPORT_SYMBOL_GPL ( adf_exit_arb ) ;