2020-04-04 10:40:24 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2013-07-07 17:25:49 +03:00
/*
2020-04-04 10:40:24 +03:00
* Copyright ( c ) 2013 - 2020 , Mellanox Technologies inc . All rights reserved .
2013-07-07 17:25:49 +03:00
*/
# include <linux/gfp.h>
# include <linux/mlx5/qp.h>
# include <linux/mlx5/driver.h>
2020-04-04 10:40:24 +03:00
# include "mlx5_ib.h"
# include "qp.h"
2013-07-07 17:25:49 +03:00
2020-04-04 10:40:24 +03:00
static int mlx5_core_drain_dct ( struct mlx5_ib_dev * dev ,
2019-03-06 19:20:50 +02:00
struct mlx5_core_dct * dct ) ;
2018-11-20 14:12:25 -08:00
static struct mlx5_core_rsc_common *
mlx5_get_rsc ( struct mlx5_qp_table * table , u32 rsn )
2013-07-07 17:25:49 +03:00
{
2014-10-02 12:19:45 +03:00
struct mlx5_core_rsc_common * common ;
2018-12-26 19:21:21 +02:00
unsigned long flags ;
2013-07-07 17:25:49 +03:00
2018-12-26 19:21:21 +02:00
spin_lock_irqsave ( & table - > lock , flags ) ;
2013-07-07 17:25:49 +03:00
2014-10-02 12:19:45 +03:00
common = radix_tree_lookup ( & table - > tree , rsn ) ;
if ( common )
2019-08-06 09:59:50 +08:00
refcount_inc ( & common - > refcount ) ;
2013-07-07 17:25:49 +03:00
2018-12-26 19:21:21 +02:00
spin_unlock_irqrestore ( & table - > lock , flags ) ;
2013-07-07 17:25:49 +03:00
2014-10-02 12:19:45 +03:00
return common ;
}
2013-07-07 17:25:49 +03:00
2014-10-02 12:19:45 +03:00
void mlx5_core_put_rsc ( struct mlx5_core_rsc_common * common )
{
2019-08-06 09:59:50 +08:00
if ( refcount_dec_and_test ( & common - > refcount ) )
2014-10-02 12:19:45 +03:00
complete ( & common - > free ) ;
}
2016-01-14 19:13:01 +02:00
static u64 qp_allowed_event_types ( void )
{
u64 mask ;
mask = BIT ( MLX5_EVENT_TYPE_PATH_MIG ) |
BIT ( MLX5_EVENT_TYPE_COMM_EST ) |
BIT ( MLX5_EVENT_TYPE_SQ_DRAINED ) |
BIT ( MLX5_EVENT_TYPE_SRQ_LAST_WQE ) |
BIT ( MLX5_EVENT_TYPE_WQ_CATAS_ERROR ) |
BIT ( MLX5_EVENT_TYPE_PATH_MIG_FAILED ) |
BIT ( MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR ) |
BIT ( MLX5_EVENT_TYPE_WQ_ACCESS_ERROR ) ;
return mask ;
}
static u64 rq_allowed_event_types ( void )
{
u64 mask ;
mask = BIT ( MLX5_EVENT_TYPE_SRQ_LAST_WQE ) |
BIT ( MLX5_EVENT_TYPE_WQ_CATAS_ERROR ) ;
return mask ;
}
static u64 sq_allowed_event_types ( void )
{
return BIT ( MLX5_EVENT_TYPE_WQ_CATAS_ERROR ) ;
}
2018-01-02 16:19:28 +02:00
static u64 dct_allowed_event_types ( void )
{
return BIT ( MLX5_EVENT_TYPE_DCT_DRAINED ) ;
}
2016-01-14 19:13:01 +02:00
static bool is_event_type_allowed ( int rsc_type , int event_type )
{
switch ( rsc_type ) {
case MLX5_EVENT_QUEUE_TYPE_QP :
return BIT ( event_type ) & qp_allowed_event_types ( ) ;
case MLX5_EVENT_QUEUE_TYPE_RQ :
return BIT ( event_type ) & rq_allowed_event_types ( ) ;
case MLX5_EVENT_QUEUE_TYPE_SQ :
return BIT ( event_type ) & sq_allowed_event_types ( ) ;
2018-01-02 16:19:28 +02:00
case MLX5_EVENT_QUEUE_TYPE_DCT :
return BIT ( event_type ) & dct_allowed_event_types ( ) ;
2016-01-14 19:13:01 +02:00
default :
WARN ( 1 , " Event arrived for unknown resource type " ) ;
return false ;
}
}
2023-06-05 13:14:06 +03:00
static int dct_event_notifier ( struct mlx5_ib_dev * dev , struct mlx5_eqe * eqe )
{
struct mlx5_core_dct * dct ;
unsigned long flags ;
u32 qpn ;
qpn = be32_to_cpu ( eqe - > data . dct . dctn ) & 0xFFFFFF ;
xa_lock_irqsave ( & dev - > qp_table . dct_xa , flags ) ;
dct = xa_load ( & dev - > qp_table . dct_xa , qpn ) ;
if ( dct )
complete ( & dct - > drained ) ;
xa_unlock_irqrestore ( & dev - > qp_table . dct_xa , flags ) ;
return NOTIFY_OK ;
}
2018-11-20 14:12:25 -08:00
static int rsc_event_notifier ( struct notifier_block * nb ,
unsigned long type , void * data )
2014-10-02 12:19:45 +03:00
{
2023-06-05 13:14:06 +03:00
struct mlx5_ib_dev * dev =
container_of ( nb , struct mlx5_ib_dev , qp_table . nb ) ;
2018-11-20 14:12:25 -08:00
struct mlx5_core_rsc_common * common ;
2023-06-05 13:14:06 +03:00
struct mlx5_eqe * eqe = data ;
2018-11-20 14:12:25 -08:00
u8 event_type = ( u8 ) type ;
2014-10-02 12:19:45 +03:00
struct mlx5_core_qp * qp ;
2018-11-20 14:12:25 -08:00
u32 rsn ;
switch ( event_type ) {
case MLX5_EVENT_TYPE_DCT_DRAINED :
2023-06-05 13:14:06 +03:00
return dct_event_notifier ( dev , eqe ) ;
2018-11-20 14:12:25 -08:00
case MLX5_EVENT_TYPE_PATH_MIG :
case MLX5_EVENT_TYPE_COMM_EST :
case MLX5_EVENT_TYPE_SQ_DRAINED :
case MLX5_EVENT_TYPE_SRQ_LAST_WQE :
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR :
case MLX5_EVENT_TYPE_PATH_MIG_FAILED :
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR :
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR :
rsn = be32_to_cpu ( eqe - > data . qp_srq . qp_srq_n ) & 0xffffff ;
rsn | = ( eqe - > data . qp_srq . type < < MLX5_USER_INDEX_LEN ) ;
break ;
default :
return NOTIFY_DONE ;
}
2023-06-05 13:14:06 +03:00
common = mlx5_get_rsc ( & dev - > qp_table , rsn ) ;
2020-04-04 10:40:24 +03:00
if ( ! common )
2018-11-20 14:12:25 -08:00
return NOTIFY_OK ;
2014-10-02 12:19:45 +03:00
2020-04-04 10:40:24 +03:00
if ( ! is_event_type_allowed ( ( rsn > > MLX5_USER_INDEX_LEN ) , event_type ) )
2018-11-08 21:10:08 +02:00
goto out ;
2016-01-14 19:13:01 +02:00
2014-10-02 12:19:45 +03:00
switch ( common - > res ) {
case MLX5_RES_QP :
2016-01-14 19:13:00 +02:00
case MLX5_RES_RQ :
case MLX5_RES_SQ :
2014-10-02 12:19:45 +03:00
qp = ( struct mlx5_core_qp * ) common ;
qp - > event ( qp , event_type ) ;
2023-01-04 11:43:34 +02:00
/* Need to put resource in event handler */
return NOTIFY_OK ;
2014-10-02 12:19:45 +03:00
default :
2020-04-04 10:40:24 +03:00
break ;
2014-10-02 12:19:45 +03:00
}
2018-11-08 21:10:08 +02:00
out :
2014-10-02 12:19:45 +03:00
mlx5_core_put_rsc ( common ) ;
2018-11-20 14:12:25 -08:00
return NOTIFY_OK ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
static int create_resource_common ( struct mlx5_ib_dev * dev ,
struct mlx5_core_qp * qp , int rsc_type )
2016-01-14 19:13:00 +02:00
{
2020-04-04 10:40:24 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
2016-01-14 19:13:00 +02:00
int err ;
qp - > common . res = rsc_type ;
spin_lock_irq ( & table - > lock ) ;
err = radix_tree_insert ( & table - > tree ,
qp - > qpn | ( rsc_type < < MLX5_USER_INDEX_LEN ) ,
qp ) ;
spin_unlock_irq ( & table - > lock ) ;
if ( err )
return err ;
2019-08-06 09:59:50 +08:00
refcount_set ( & qp - > common . refcount , 1 ) ;
2016-01-14 19:13:00 +02:00
init_completion ( & qp - > common . free ) ;
qp - > pid = current - > pid ;
return 0 ;
}
2020-04-04 10:40:24 +03:00
static void destroy_resource_common ( struct mlx5_ib_dev * dev ,
2018-01-02 16:19:28 +02:00
struct mlx5_core_qp * qp )
2016-01-14 19:13:00 +02:00
{
2020-04-04 10:40:24 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
2016-01-14 19:13:00 +02:00
unsigned long flags ;
spin_lock_irqsave ( & table - > lock , flags ) ;
radix_tree_delete ( & table - > tree ,
qp - > qpn | ( qp - > common . res < < MLX5_USER_INDEX_LEN ) ) ;
spin_unlock_irqrestore ( & table - > lock , flags ) ;
mlx5_core_put_rsc ( ( struct mlx5_core_rsc_common * ) qp ) ;
wait_for_completion ( & qp - > common . free ) ;
}
2020-04-04 10:40:24 +03:00
static int _mlx5_core_destroy_dct ( struct mlx5_ib_dev * dev ,
2023-06-05 13:14:06 +03:00
struct mlx5_core_dct * dct )
2019-03-06 19:20:50 +02:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( destroy_dct_in ) ] = { } ;
2019-03-06 19:20:50 +02:00
struct mlx5_core_qp * qp = & dct - > mqp ;
MLX5_SET ( destroy_dct_in , in , opcode , MLX5_CMD_OP_DESTROY_DCT ) ;
MLX5_SET ( destroy_dct_in , in , dctn , qp - > qpn ) ;
MLX5_SET ( destroy_dct_in , in , uid , qp - > uid ) ;
2023-06-05 13:14:06 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , destroy_dct , in ) ;
2019-03-06 19:20:50 +02:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_create_dct ( struct mlx5_ib_dev * dev , struct mlx5_core_dct * dct ,
u32 * in , int inlen , u32 * out , int outlen )
2018-01-02 16:19:28 +02:00
{
struct mlx5_core_qp * qp = & dct - > mqp ;
int err ;
init_completion ( & dct - > drained ) ;
MLX5_SET ( create_dct_in , in , opcode , MLX5_CMD_OP_CREATE_DCT ) ;
net/mlx5: Use mlx5_cmd_do() in core create_{cq,dct}
mlx5_core_create_{cq/dct} functions are non-trivial mlx5 commands
functions. They check command execution status themselves and hide
valuable FW failure information.
For mlx5_core/eth kernel user this is what we actually want, but for a
devx/rdma user the hidden information is essential and should be propagated
up to the caller, thus we convert these commands to use mlx5_cmd_do
to return the FW/driver and command outbox status as is, and let the caller
decide what to do with it.
For kernel callers of mlx5_core_create_{cq/dct} or those who only care about
the binary status (FAIL/SUCCESS) they must check status themselves via
mlx5_cmd_check() to restore the current behavior.
err = mlx5_create_cq(in, out)
err = mlx5_cmd_check(err, in, out)
if (err)
// handle err
For DEVX users and those who care about full visibility, They will just
propagate the error to user space, and app can check if err == -EREMOTEIO,
then outbox.{status,syndrome} are valid.
API Note:
mlx5_cmd_check() must be used by kernel users since it allows the driver
to intercept the command execution status and return a driver simulated
status in case of driver induced error handling or reset/recovery flows.
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-03-30 21:12:58 -07:00
err = mlx5_cmd_do ( dev - > mdev , in , inlen , out , outlen ) ;
2020-04-04 10:40:24 +03:00
if ( err )
2018-01-02 16:19:28 +02:00
return err ;
qp - > qpn = MLX5_GET ( create_dct_out , out , dctn ) ;
2018-09-20 21:35:25 +03:00
qp - > uid = MLX5_GET ( create_dct_in , in , uid ) ;
2023-06-05 13:14:06 +03:00
err = xa_err ( xa_store_irq ( & dev - > qp_table . dct_xa , qp - > qpn , dct , GFP_KERNEL ) ) ;
2018-01-02 16:19:28 +02:00
if ( err )
goto err_cmd ;
return 0 ;
err_cmd :
2023-06-05 13:14:06 +03:00
_mlx5_core_destroy_dct ( dev , dct ) ;
2018-01-02 16:19:28 +02:00
return err ;
}
2020-05-26 14:54:34 +03:00
int mlx5_qpc_create_qp ( struct mlx5_ib_dev * dev , struct mlx5_core_qp * qp ,
u32 * in , int inlen , u32 * out )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
u32 din [ MLX5_ST_SZ_DW ( destroy_qp_in ) ] = { } ;
2013-07-07 17:25:49 +03:00
int err ;
2016-07-19 01:17:59 +03:00
MLX5_SET ( create_qp_in , in , opcode , MLX5_CMD_OP_CREATE_QP ) ;
2013-07-07 17:25:49 +03:00
2020-05-26 14:54:34 +03:00
err = mlx5_cmd_exec ( dev - > mdev , in , inlen , out ,
MLX5_ST_SZ_BYTES ( create_qp_out ) ) ;
2016-07-19 01:17:59 +03:00
if ( err )
2013-07-07 17:25:49 +03:00
return err ;
2018-09-20 21:35:21 +03:00
qp - > uid = MLX5_GET ( create_qp_in , in , uid ) ;
2016-07-19 01:17:59 +03:00
qp - > qpn = MLX5_GET ( create_qp_out , out , qpn ) ;
2013-07-07 17:25:49 +03:00
2018-01-02 16:19:28 +02:00
err = create_resource_common ( dev , qp , MLX5_RES_QP ) ;
2016-01-14 19:13:00 +02:00
if ( err )
2013-07-07 17:25:49 +03:00
goto err_cmd ;
2020-04-04 10:40:24 +03:00
mlx5_debug_qp_add ( dev - > mdev , qp ) ;
2013-07-07 17:25:49 +03:00
return 0 ;
err_cmd :
2017-12-04 08:59:25 +02:00
MLX5_SET ( destroy_qp_in , din , opcode , MLX5_CMD_OP_DESTROY_QP ) ;
MLX5_SET ( destroy_qp_in , din , qpn , qp - > qpn ) ;
2018-09-20 21:35:21 +03:00
MLX5_SET ( destroy_qp_in , din , uid , qp - > uid ) ;
2020-04-04 10:40:24 +03:00
mlx5_cmd_exec_in ( dev - > mdev , destroy_qp , din ) ;
2013-07-07 17:25:49 +03:00
return err ;
}
2020-04-04 10:40:24 +03:00
static int mlx5_core_drain_dct ( struct mlx5_ib_dev * dev ,
2018-01-02 16:19:28 +02:00
struct mlx5_core_dct * dct )
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( drain_dct_in ) ] = { } ;
2018-01-02 16:19:28 +02:00
struct mlx5_core_qp * qp = & dct - > mqp ;
MLX5_SET ( drain_dct_in , in , opcode , MLX5_CMD_OP_DRAIN_DCT ) ;
MLX5_SET ( drain_dct_in , in , dctn , qp - > qpn ) ;
2018-09-20 21:35:25 +03:00
MLX5_SET ( drain_dct_in , in , uid , qp - > uid ) ;
2020-04-04 10:40:24 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , drain_dct , in ) ;
2018-01-02 16:19:28 +02:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_destroy_dct ( struct mlx5_ib_dev * dev ,
2018-01-02 16:19:28 +02:00
struct mlx5_core_dct * dct )
{
2023-06-05 13:14:06 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
struct mlx5_core_dct * tmp ;
int err ;
err = mlx5_core_drain_dct ( dev , dct ) ;
if ( err ) {
if ( dev - > mdev - > state = = MLX5_DEVICE_STATE_INTERNAL_ERROR )
goto destroy ;
return err ;
}
wait_for_completion ( & dct - > drained ) ;
destroy :
tmp = xa_cmpxchg_irq ( & table - > dct_xa , dct - > mqp . qpn , dct , XA_ZERO_ENTRY , GFP_KERNEL ) ;
if ( WARN_ON ( tmp ! = dct ) )
return xa_err ( tmp ) ? : - EINVAL ;
err = _mlx5_core_destroy_dct ( dev , dct ) ;
if ( err ) {
xa_cmpxchg_irq ( & table - > dct_xa , dct - > mqp . qpn , XA_ZERO_ENTRY , dct , 0 ) ;
return err ;
}
xa_erase_irq ( & table - > dct_xa , dct - > mqp . qpn ) ;
return 0 ;
2018-01-02 16:19:28 +02:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_destroy_qp ( struct mlx5_ib_dev * dev , struct mlx5_core_qp * qp )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( destroy_qp_in ) ] = { } ;
2013-07-07 17:25:49 +03:00
2020-04-04 10:40:24 +03:00
mlx5_debug_qp_remove ( dev - > mdev , qp ) ;
2013-07-07 17:25:49 +03:00
2018-01-02 16:19:28 +02:00
destroy_resource_common ( dev , qp ) ;
2013-07-07 17:25:49 +03:00
2016-07-19 01:17:59 +03:00
MLX5_SET ( destroy_qp_in , in , opcode , MLX5_CMD_OP_DESTROY_QP ) ;
MLX5_SET ( destroy_qp_in , in , qpn , qp - > qpn ) ;
2018-09-20 21:35:21 +03:00
MLX5_SET ( destroy_qp_in , in , uid , qp - > uid ) ;
2023-06-05 13:14:07 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , destroy_qp , in ) ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_set_delay_drop ( struct mlx5_ib_dev * dev ,
2017-05-30 10:29:11 +03:00
u32 timeout_usec )
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( set_delay_drop_params_in ) ] = { } ;
2017-05-30 10:29:11 +03:00
MLX5_SET ( set_delay_drop_params_in , in , opcode ,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS ) ;
MLX5_SET ( set_delay_drop_params_in , in , delay_drop_timeout ,
timeout_usec / 100 ) ;
2020-04-04 10:40:24 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , set_delay_drop_params , in ) ;
2017-05-30 10:29:11 +03:00
}
2016-07-19 18:03:21 +03:00
struct mbox_info {
u32 * in ;
u32 * out ;
int inlen ;
int outlen ;
} ;
static int mbox_alloc ( struct mbox_info * mbox , int inlen , int outlen )
{
mbox - > inlen = inlen ;
mbox - > outlen = outlen ;
mbox - > in = kzalloc ( mbox - > inlen , GFP_KERNEL ) ;
mbox - > out = kzalloc ( mbox - > outlen , GFP_KERNEL ) ;
if ( ! mbox - > in | | ! mbox - > out ) {
kfree ( mbox - > in ) ;
kfree ( mbox - > out ) ;
return - ENOMEM ;
}
return 0 ;
}
static void mbox_free ( struct mbox_info * mbox )
{
kfree ( mbox - > in ) ;
kfree ( mbox - > out ) ;
}
2020-05-26 14:54:40 +03:00
static int get_ece_from_mbox ( void * out , u16 opcode )
{
int ece = 0 ;
switch ( opcode ) {
2020-06-16 13:45:36 +03:00
case MLX5_CMD_OP_INIT2INIT_QP :
ece = MLX5_GET ( init2init_qp_out , out , ece ) ;
break ;
2020-05-26 14:54:40 +03:00
case MLX5_CMD_OP_INIT2RTR_QP :
ece = MLX5_GET ( init2rtr_qp_out , out , ece ) ;
break ;
case MLX5_CMD_OP_RTR2RTS_QP :
ece = MLX5_GET ( rtr2rts_qp_out , out , ece ) ;
break ;
case MLX5_CMD_OP_RTS2RTS_QP :
ece = MLX5_GET ( rts2rts_qp_out , out , ece ) ;
break ;
2020-06-16 13:45:36 +03:00
case MLX5_CMD_OP_RST2INIT_QP :
ece = MLX5_GET ( rst2init_qp_out , out , ece ) ;
break ;
2020-05-26 14:54:40 +03:00
default :
break ;
}
return ece ;
}
2016-07-19 18:03:21 +03:00
static int modify_qp_mbox_alloc ( struct mlx5_core_dev * dev , u16 opcode , int qpn ,
u32 opt_param_mask , void * qpc ,
2020-05-26 14:54:39 +03:00
struct mbox_info * mbox , u16 uid , u32 ece )
2016-07-19 18:03:21 +03:00
{
mbox - > out = NULL ;
mbox - > in = NULL ;
# define MBOX_ALLOC(mbox, typ) \
mbox_alloc ( mbox , MLX5_ST_SZ_BYTES ( typ # # _in ) , MLX5_ST_SZ_BYTES ( typ # # _out ) )
2018-09-20 21:35:21 +03:00
# define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
do { \
MLX5_SET ( typ # # _in , in , opcode , _opcode ) ; \
MLX5_SET ( typ # # _in , in , qpn , _qpn ) ; \
MLX5_SET ( typ # # _in , in , uid , _uid ) ; \
} while ( 0 )
# define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
do { \
MOD_QP_IN_SET ( typ , in , _opcode , _qpn , _uid ) ; \
MLX5_SET ( typ # # _in , in , opt_param_mask , _opt_p ) ; \
memcpy ( MLX5_ADDR_OF ( typ # # _in , in , qpc ) , _qpc , \
MLX5_ST_SZ_BYTES ( qpc ) ) ; \
} while ( 0 )
2016-07-19 18:03:21 +03:00
switch ( opcode ) {
/* 2RST & 2ERR */
case MLX5_CMD_OP_2RST_QP :
if ( MBOX_ALLOC ( mbox , qp_2rst ) )
return - ENOMEM ;
2018-09-20 21:35:21 +03:00
MOD_QP_IN_SET ( qp_2rst , mbox - > in , opcode , qpn , uid ) ;
2016-07-19 18:03:21 +03:00
break ;
case MLX5_CMD_OP_2ERR_QP :
if ( MBOX_ALLOC ( mbox , qp_2err ) )
return - ENOMEM ;
2018-09-20 21:35:21 +03:00
MOD_QP_IN_SET ( qp_2err , mbox - > in , opcode , qpn , uid ) ;
2016-07-19 18:03:21 +03:00
break ;
/* MODIFY with QPC */
case MLX5_CMD_OP_RST2INIT_QP :
if ( MBOX_ALLOC ( mbox , rst2init_qp ) )
return - ENOMEM ;
2018-04-09 13:43:36 +01:00
MOD_QP_IN_SET_QPC ( rst2init_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2020-06-16 13:45:36 +03:00
MLX5_SET ( rst2init_qp_in , mbox - > in , ece , ece ) ;
2018-04-09 13:43:36 +01:00
break ;
2016-07-19 18:03:21 +03:00
case MLX5_CMD_OP_INIT2RTR_QP :
if ( MBOX_ALLOC ( mbox , init2rtr_qp ) )
return - ENOMEM ;
2018-04-09 13:43:36 +01:00
MOD_QP_IN_SET_QPC ( init2rtr_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2020-05-26 14:54:39 +03:00
MLX5_SET ( init2rtr_qp_in , mbox - > in , ece , ece ) ;
2018-04-09 13:43:36 +01:00
break ;
2016-07-19 18:03:21 +03:00
case MLX5_CMD_OP_RTR2RTS_QP :
if ( MBOX_ALLOC ( mbox , rtr2rts_qp ) )
return - ENOMEM ;
2018-04-09 13:43:36 +01:00
MOD_QP_IN_SET_QPC ( rtr2rts_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2020-05-26 14:54:39 +03:00
MLX5_SET ( rtr2rts_qp_in , mbox - > in , ece , ece ) ;
2018-04-09 13:43:36 +01:00
break ;
2016-07-19 18:03:21 +03:00
case MLX5_CMD_OP_RTS2RTS_QP :
if ( MBOX_ALLOC ( mbox , rts2rts_qp ) )
return - ENOMEM ;
MOD_QP_IN_SET_QPC ( rts2rts_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2020-05-26 14:54:39 +03:00
MLX5_SET ( rts2rts_qp_in , mbox - > in , ece , ece ) ;
2016-07-19 18:03:21 +03:00
break ;
case MLX5_CMD_OP_SQERR2RTS_QP :
if ( MBOX_ALLOC ( mbox , sqerr2rts_qp ) )
return - ENOMEM ;
MOD_QP_IN_SET_QPC ( sqerr2rts_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2016-07-19 18:03:21 +03:00
break ;
2021-05-10 13:23:32 +03:00
case MLX5_CMD_OP_SQD_RTS_QP :
if ( MBOX_ALLOC ( mbox , sqd2rts_qp ) )
return - ENOMEM ;
MOD_QP_IN_SET_QPC ( sqd2rts_qp , mbox - > in , opcode , qpn ,
opt_param_mask , qpc , uid ) ;
break ;
2016-07-19 18:03:21 +03:00
case MLX5_CMD_OP_INIT2INIT_QP :
if ( MBOX_ALLOC ( mbox , init2init_qp ) )
return - ENOMEM ;
MOD_QP_IN_SET_QPC ( init2init_qp , mbox - > in , opcode , qpn ,
2018-09-20 21:35:21 +03:00
opt_param_mask , qpc , uid ) ;
2020-06-16 13:45:36 +03:00
MLX5_SET ( init2init_qp_in , mbox - > in , ece , ece ) ;
2016-07-19 18:03:21 +03:00
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2020-04-04 10:40:24 +03:00
int mlx5_core_qp_modify ( struct mlx5_ib_dev * dev , u16 opcode , u32 opt_param_mask ,
2020-05-26 14:54:39 +03:00
void * qpc , struct mlx5_core_qp * qp , u32 * ece )
2013-07-07 17:25:49 +03:00
{
2016-07-19 18:03:21 +03:00
struct mbox_info mbox ;
int err ;
2013-07-07 17:25:49 +03:00
2020-05-26 14:54:39 +03:00
err = modify_qp_mbox_alloc ( dev - > mdev , opcode , qp - > qpn , opt_param_mask ,
qpc , & mbox , qp - > uid , ( ece ) ? * ece : 0 ) ;
2013-07-07 17:25:49 +03:00
if ( err )
return err ;
2020-04-04 10:40:24 +03:00
err = mlx5_cmd_exec ( dev - > mdev , mbox . in , mbox . inlen , mbox . out ,
mbox . outlen ) ;
2020-05-26 14:54:40 +03:00
if ( ece )
* ece = get_ece_from_mbox ( mbox . out , opcode ) ;
2016-07-19 18:03:21 +03:00
mbox_free ( & mbox ) ;
return err ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_init_qp_table ( struct mlx5_ib_dev * dev )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
2013-07-07 17:25:49 +03:00
spin_lock_init ( & table - > lock ) ;
INIT_RADIX_TREE ( & table - > tree , GFP_ATOMIC ) ;
2023-06-05 13:14:06 +03:00
xa_init ( & table - > dct_xa ) ;
2020-04-04 10:40:24 +03:00
mlx5_qp_debugfs_init ( dev - > mdev ) ;
2018-11-20 14:12:25 -08:00
2018-11-26 14:39:06 -08:00
table - > nb . notifier_call = rsc_event_notifier ;
2020-04-04 10:40:24 +03:00
mlx5_notifier_register ( dev - > mdev , & table - > nb ) ;
return 0 ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
void mlx5_cleanup_qp_table ( struct mlx5_ib_dev * dev )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
2018-11-20 14:12:25 -08:00
2020-04-04 10:40:24 +03:00
mlx5_notifier_unregister ( dev - > mdev , & table - > nb ) ;
mlx5_qp_debugfs_cleanup ( dev - > mdev ) ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_qp_query ( struct mlx5_ib_dev * dev , struct mlx5_core_qp * qp ,
2023-01-04 11:43:36 +02:00
u32 * out , int outlen , bool qpc_ext )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( query_qp_in ) ] = { } ;
2013-07-07 17:25:49 +03:00
2016-07-19 01:17:59 +03:00
MLX5_SET ( query_qp_in , in , opcode , MLX5_CMD_OP_QUERY_QP ) ;
MLX5_SET ( query_qp_in , in , qpn , qp - > qpn ) ;
2023-01-04 11:43:36 +02:00
MLX5_SET ( query_qp_in , in , qpc_ext , qpc_ext ) ;
2020-04-04 10:40:24 +03:00
return mlx5_cmd_exec ( dev - > mdev , in , sizeof ( in ) , out , outlen ) ;
2013-07-07 17:25:49 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_dct_query ( struct mlx5_ib_dev * dev , struct mlx5_core_dct * dct ,
2018-01-02 16:19:28 +02:00
u32 * out , int outlen )
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( query_dct_in ) ] = { } ;
2018-01-02 16:19:28 +02:00
struct mlx5_core_qp * qp = & dct - > mqp ;
MLX5_SET ( query_dct_in , in , opcode , MLX5_CMD_OP_QUERY_DCT ) ;
MLX5_SET ( query_dct_in , in , dctn , qp - > qpn ) ;
2020-04-04 10:40:24 +03:00
return mlx5_cmd_exec ( dev - > mdev , ( void * ) & in , sizeof ( in ) , ( void * ) out ,
outlen ) ;
2018-01-02 16:19:28 +02:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_xrcd_alloc ( struct mlx5_ib_dev * dev , u32 * xrcdn )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
u32 out [ MLX5_ST_SZ_DW ( alloc_xrcd_out ) ] = { } ;
u32 in [ MLX5_ST_SZ_DW ( alloc_xrcd_in ) ] = { } ;
2013-07-07 17:25:49 +03:00
int err ;
2016-07-19 01:17:59 +03:00
MLX5_SET ( alloc_xrcd_in , in , opcode , MLX5_CMD_OP_ALLOC_XRCD ) ;
2020-04-04 10:40:24 +03:00
err = mlx5_cmd_exec_inout ( dev - > mdev , alloc_xrcd , in , out ) ;
2016-07-19 01:17:59 +03:00
if ( ! err )
* xrcdn = MLX5_GET ( alloc_xrcd_out , out , xrcd ) ;
2013-07-07 17:25:49 +03:00
return err ;
}
2020-04-04 10:40:24 +03:00
int mlx5_core_xrcd_dealloc ( struct mlx5_ib_dev * dev , u32 xrcdn )
2013-07-07 17:25:49 +03:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( dealloc_xrcd_in ) ] = { } ;
2013-07-07 17:25:49 +03:00
2016-07-19 01:17:59 +03:00
MLX5_SET ( dealloc_xrcd_in , in , opcode , MLX5_CMD_OP_DEALLOC_XRCD ) ;
MLX5_SET ( dealloc_xrcd_in , in , xrcd , xrcdn ) ;
2020-04-04 10:40:24 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , dealloc_xrcd , in ) ;
2013-07-07 17:25:49 +03:00
}
2014-12-11 17:04:19 +02:00
2023-06-05 13:14:07 +03:00
static int destroy_rq_tracked ( struct mlx5_ib_dev * dev , u32 rqn , u16 uid )
2018-09-20 21:35:22 +03:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( destroy_rq_in ) ] = { } ;
2018-09-20 21:35:22 +03:00
MLX5_SET ( destroy_rq_in , in , opcode , MLX5_CMD_OP_DESTROY_RQ ) ;
MLX5_SET ( destroy_rq_in , in , rqn , rqn ) ;
MLX5_SET ( destroy_rq_in , in , uid , uid ) ;
2023-06-05 13:14:07 +03:00
return mlx5_cmd_exec_in ( dev - > mdev , destroy_rq , in ) ;
2018-09-20 21:35:22 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_create_rq_tracked ( struct mlx5_ib_dev * dev , u32 * in , int inlen ,
2016-01-14 19:13:00 +02:00
struct mlx5_core_qp * rq )
{
int err ;
u32 rqn ;
2020-04-04 10:40:24 +03:00
err = mlx5_core_create_rq ( dev - > mdev , in , inlen , & rqn ) ;
2016-01-14 19:13:00 +02:00
if ( err )
return err ;
2018-09-20 21:35:22 +03:00
rq - > uid = MLX5_GET ( create_rq_in , in , uid ) ;
2016-01-14 19:13:00 +02:00
rq - > qpn = rqn ;
2018-01-02 16:19:28 +02:00
err = create_resource_common ( dev , rq , MLX5_RES_RQ ) ;
2016-01-14 19:13:00 +02:00
if ( err )
goto err_destroy_rq ;
return 0 ;
err_destroy_rq :
2018-09-20 21:35:22 +03:00
destroy_rq_tracked ( dev , rq - > qpn , rq - > uid ) ;
2016-01-14 19:13:00 +02:00
return err ;
}
2020-09-07 15:09:20 +03:00
int mlx5_core_destroy_rq_tracked ( struct mlx5_ib_dev * dev ,
struct mlx5_core_qp * rq )
2016-01-14 19:13:00 +02:00
{
2018-01-02 16:19:28 +02:00
destroy_resource_common ( dev , rq ) ;
2023-06-05 13:14:07 +03:00
return destroy_rq_tracked ( dev , rq - > qpn , rq - > uid ) ;
2016-01-14 19:13:00 +02:00
}
2020-04-04 10:40:24 +03:00
static void destroy_sq_tracked ( struct mlx5_ib_dev * dev , u32 sqn , u16 uid )
2018-09-20 21:35:23 +03:00
{
2020-04-04 10:40:24 +03:00
u32 in [ MLX5_ST_SZ_DW ( destroy_sq_in ) ] = { } ;
2018-09-20 21:35:23 +03:00
MLX5_SET ( destroy_sq_in , in , opcode , MLX5_CMD_OP_DESTROY_SQ ) ;
MLX5_SET ( destroy_sq_in , in , sqn , sqn ) ;
MLX5_SET ( destroy_sq_in , in , uid , uid ) ;
2020-04-04 10:40:24 +03:00
mlx5_cmd_exec_in ( dev - > mdev , destroy_sq , in ) ;
2018-09-20 21:35:23 +03:00
}
2020-04-04 10:40:24 +03:00
int mlx5_core_create_sq_tracked ( struct mlx5_ib_dev * dev , u32 * in , int inlen ,
2016-01-14 19:13:00 +02:00
struct mlx5_core_qp * sq )
{
2020-04-04 10:40:24 +03:00
u32 out [ MLX5_ST_SZ_DW ( create_sq_out ) ] = { } ;
2016-01-14 19:13:00 +02:00
int err ;
2020-04-04 10:40:24 +03:00
MLX5_SET ( create_sq_in , in , opcode , MLX5_CMD_OP_CREATE_SQ ) ;
err = mlx5_cmd_exec ( dev - > mdev , in , inlen , out , sizeof ( out ) ) ;
2016-01-14 19:13:00 +02:00
if ( err )
return err ;
2020-04-04 10:40:24 +03:00
sq - > qpn = MLX5_GET ( create_sq_out , out , sqn ) ;
2018-09-20 21:35:23 +03:00
sq - > uid = MLX5_GET ( create_sq_in , in , uid ) ;
2018-01-02 16:19:28 +02:00
err = create_resource_common ( dev , sq , MLX5_RES_SQ ) ;
2016-01-14 19:13:00 +02:00
if ( err )
goto err_destroy_sq ;
return 0 ;
err_destroy_sq :
2018-09-20 21:35:23 +03:00
destroy_sq_tracked ( dev , sq - > qpn , sq - > uid ) ;
2016-01-14 19:13:00 +02:00
return err ;
}
2020-04-04 10:40:24 +03:00
void mlx5_core_destroy_sq_tracked ( struct mlx5_ib_dev * dev ,
2016-01-14 19:13:00 +02:00
struct mlx5_core_qp * sq )
{
2018-01-02 16:19:28 +02:00
destroy_resource_common ( dev , sq ) ;
2018-09-20 21:35:23 +03:00
destroy_sq_tracked ( dev , sq - > qpn , sq - > uid ) ;
2016-01-14 19:13:00 +02:00
}
2016-04-20 22:02:09 +03:00
2020-04-04 10:40:24 +03:00
struct mlx5_core_rsc_common * mlx5_core_res_hold ( struct mlx5_ib_dev * dev ,
2018-11-08 21:10:10 +02:00
int res_num ,
enum mlx5_res_type res_type )
{
u32 rsn = res_num | ( res_type < < MLX5_USER_INDEX_LEN ) ;
2020-04-04 10:40:24 +03:00
struct mlx5_qp_table * table = & dev - > qp_table ;
2018-11-08 21:10:10 +02:00
2018-11-20 14:12:25 -08:00
return mlx5_get_rsc ( table , rsn ) ;
2018-11-08 21:10:10 +02:00
}
void mlx5_core_res_put ( struct mlx5_core_rsc_common * res )
{
mlx5_core_put_rsc ( res ) ;
}