2018-07-23 15:25:07 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright ( c ) 2018 , Mellanox Technologies inc . All rights reserved .
*/
# include <rdma/ib_user_verbs.h>
# include <rdma/ib_verbs.h>
# include <rdma/uverbs_types.h>
# include <rdma/uverbs_ioctl.h>
2018-08-28 14:18:52 +03:00
# include <rdma/uverbs_std_types.h>
2018-07-23 15:25:07 +03:00
# include <rdma/mlx5_user_ioctl_cmds.h>
2018-08-28 14:18:51 +03:00
# include <rdma/mlx5_user_ioctl_verbs.h>
2018-07-23 15:25:07 +03:00
# include <rdma/ib_umem.h>
# include <linux/mlx5/driver.h>
# include <linux/mlx5/fs.h>
# include "mlx5_ib.h"
# define UVERBS_MODULE_NAME mlx5_ib
# include <rdma/uverbs_named_ioctl.h>
2018-08-28 14:18:51 +03:00
static int
mlx5_ib_ft_type_to_namespace ( enum mlx5_ib_uapi_flow_table_type table_type ,
enum mlx5_flow_namespace_type * namespace )
{
switch ( table_type ) {
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX :
* namespace = MLX5_FLOW_NAMESPACE_BYPASS ;
break ;
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX :
* namespace = MLX5_FLOW_NAMESPACE_EGRESS ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2018-07-23 15:25:07 +03:00
static const struct uverbs_attr_spec mlx5_ib_flow_type [ ] = {
[ MLX5_IB_FLOW_TYPE_NORMAL ] = {
. type = UVERBS_ATTR_TYPE_PTR_IN ,
. u . ptr = {
. len = sizeof ( u16 ) , /* data is priority */
. min_len = sizeof ( u16 ) ,
}
} ,
[ MLX5_IB_FLOW_TYPE_SNIFFER ] = {
. type = UVERBS_ATTR_TYPE_PTR_IN ,
UVERBS_ATTR_NO_DATA ( ) ,
} ,
[ MLX5_IB_FLOW_TYPE_ALL_DEFAULT ] = {
. type = UVERBS_ATTR_TYPE_PTR_IN ,
UVERBS_ATTR_NO_DATA ( ) ,
} ,
[ MLX5_IB_FLOW_TYPE_MC_DEFAULT ] = {
. type = UVERBS_ATTR_TYPE_PTR_IN ,
UVERBS_ATTR_NO_DATA ( ) ,
} ,
} ;
2018-09-06 17:27:06 +03:00
# define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
2018-07-23 15:25:09 +03:00
static int UVERBS_HANDLER ( MLX5_IB_METHOD_CREATE_FLOW ) (
2018-07-25 21:40:18 -06:00
struct ib_uverbs_file * file , struct uverbs_attr_bundle * attrs )
2018-07-23 15:25:09 +03:00
{
2018-09-06 17:27:05 +03:00
struct mlx5_flow_act flow_act = { . flow_tag = MLX5_FS_DEFAULT_FLOW_TAG } ;
2018-07-23 15:25:09 +03:00
struct mlx5_ib_flow_handler * flow_handler ;
struct mlx5_ib_flow_matcher * fs_matcher ;
2018-09-06 17:27:06 +03:00
struct ib_uobject * * arr_flow_actions ;
struct ib_uflow_resources * uflow_res ;
2018-07-23 15:25:09 +03:00
void * devx_obj ;
int dest_id , dest_type ;
void * cmd_in ;
int inlen ;
bool dest_devx , dest_qp ;
struct ib_qp * qp = NULL ;
struct ib_uobject * uobj =
uverbs_attr_get_uobject ( attrs , MLX5_IB_ATTR_CREATE_FLOW_HANDLE ) ;
struct mlx5_ib_dev * dev = to_mdev ( uobj - > context - > device ) ;
2018-09-06 17:27:06 +03:00
int len , ret , i ;
2018-07-23 15:25:09 +03:00
if ( ! capable ( CAP_NET_RAW ) )
return - EPERM ;
dest_devx =
uverbs_attr_is_valid ( attrs , MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX ) ;
dest_qp = uverbs_attr_is_valid ( attrs ,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP ) ;
2018-09-06 17:27:08 +03:00
fs_matcher = uverbs_attr_get_obj ( attrs ,
MLX5_IB_ATTR_CREATE_FLOW_MATCHER ) ;
if ( fs_matcher - > ns_type = = MLX5_FLOW_NAMESPACE_BYPASS & &
( ( dest_devx & & dest_qp ) | | ( ! dest_devx & & ! dest_qp ) ) )
return - EINVAL ;
if ( fs_matcher - > ns_type = = MLX5_FLOW_NAMESPACE_EGRESS & &
( dest_devx | | dest_qp ) )
2018-07-23 15:25:09 +03:00
return - EINVAL ;
if ( dest_devx ) {
devx_obj = uverbs_attr_get_obj (
attrs , MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX ) ;
if ( IS_ERR ( devx_obj ) )
return PTR_ERR ( devx_obj ) ;
/* Verify that the given DEVX object is a flow
* steering destination .
*/
if ( ! mlx5_ib_devx_is_flow_dest ( devx_obj , & dest_id , & dest_type ) )
return - EINVAL ;
2018-09-06 17:27:08 +03:00
} else if ( dest_qp ) {
2018-07-23 15:25:09 +03:00
struct mlx5_ib_qp * mqp ;
qp = uverbs_attr_get_obj ( attrs ,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP ) ;
if ( IS_ERR ( qp ) )
return PTR_ERR ( qp ) ;
if ( qp - > qp_type ! = IB_QPT_RAW_PACKET )
return - EINVAL ;
mqp = to_mqp ( qp ) ;
if ( mqp - > flags & MLX5_IB_QP_RSS )
dest_id = mqp - > rss_qp . tirn ;
else
dest_id = mqp - > raw_packet_qp . rq . tirn ;
dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR ;
2018-09-06 17:27:08 +03:00
} else {
dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT ;
2018-07-23 15:25:09 +03:00
}
if ( dev - > rep )
return - ENOTSUPP ;
cmd_in = uverbs_attr_get_alloced_ptr (
attrs , MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE ) ;
inlen = uverbs_attr_get_len ( attrs ,
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE ) ;
2018-09-06 17:27:06 +03:00
uflow_res = flow_resources_alloc ( MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS ) ;
if ( ! uflow_res )
return - ENOMEM ;
len = uverbs_attr_get_uobjs_arr ( attrs ,
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS , & arr_flow_actions ) ;
for ( i = 0 ; i < len ; i + + ) {
struct mlx5_ib_flow_action * maction =
to_mflow_act ( arr_flow_actions [ i ] - > object ) ;
ret = parse_flow_flow_action ( maction , false , & flow_act ) ;
if ( ret )
goto err_out ;
flow_resources_add ( uflow_res , IB_FLOW_SPEC_ACTION_HANDLE ,
arr_flow_actions [ i ] - > object ) ;
}
2018-10-10 09:55:10 +03:00
ret = uverbs_copy_from ( & flow_act . flow_tag , attrs ,
MLX5_IB_ATTR_CREATE_FLOW_TAG ) ;
if ( ! ret ) {
if ( flow_act . flow_tag > = BIT ( 24 ) ) {
ret = - EINVAL ;
goto err_out ;
}
First merge window pull request
This has been a smaller cycle with many of the commits being smallish code
fixes and improvements across the drivers.
- Driver updates for bnxt_re, cxgb4, hfi1, hns, mlx5, nes, qedr, and rxe
- Memory window support in hns
- mlx5 user API 'flow mutate/steering' allows accessing the full packet
mangling and matching machinery from user space
- Support inter-working with verbs API calls in the 'devx' mlx5 user API, and
provide options to use devx with less privilege
- Modernize the use of syfs and the device interface to use attribute groups
and cdev properly for uverbs, and clean up some of the core code's device list
management
- More progress on net namespaces for RDMA devices
- Consolidate driver BAR mmapping support into core code helpers and rework
how RDMA holds poitners to mm_struct for get_user_pages cases
- First pass to use 'dev_name' instead of ib_device->name
- Device renaming for RDMA devices
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAlvR7dUACgkQOG33FX4g
mxojiw//a9GU5kq4IZ3LNAEio/3Ql/NHRF0uie5tSzJgipRJA1Ln9zW0Cm1S/ms1
VCmaSJ3l3q3GC4i3tIlsZSIIkN5qtjv/FsT/i+TZwSJYx9BDpPbzWtG6Mp4PSDj0
v3xzklFCN5HMOmEcjkNmyZw3VjHOt2Iw2mKjqvGbI9imCPLOYnw+WQaZLmMWMH6p
GL0HDbAopN5Lv8ireWd8pOhPLVbSb12cWM1crx+yHOS3q8YNWjIXGiZr/QkOPtPr
cymSXB8yuITJ7gnjbs/GxZHg6rxU0knC/Ck8hE7FqqYYHgytTklOXDE2ef1J2lFe
1VmotD+nTsCir0mZWSdcRrszEk7tzaZT7n1oWggKvWySDB6qaH0II8vWumJchQnN
pElIQn/WDgpekIqplamNqXJnKnDXZJpEVA01OHHDN4MNSc+Ad08hQy4FyFzpB6/G
jv9TnDMfGC6ma9pr1ipOXyCgCa2pHYEUCaYxUqRA0O/4ATVl7/PplqT0rqtJ6hKg
o/hmaVCawIFOUKD87/bo7Em2HBs3xNwE/c5ggbsQElLYeydrgPrZfrPfjkshv5K3
eIKDb+HPyis0is1aiF7m/bz1hSIYZp0bQhuKCdzLRjZobwCm5WDPhtuuAWb7vYVw
GSLCJWyet+bLyZxynNOt67gKm9je9lt8YTr5nilz49KeDytspK0=
=pacJ
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has been a smaller cycle with many of the commits being smallish
code fixes and improvements across the drivers.
- Driver updates for bnxt_re, cxgb4, hfi1, hns, mlx5, nes, qedr, and
rxe
- Memory window support in hns
- mlx5 user API 'flow mutate/steering' allows accessing the full
packet mangling and matching machinery from user space
- Support inter-working with verbs API calls in the 'devx' mlx5 user
API, and provide options to use devx with less privilege
- Modernize the use of syfs and the device interface to use attribute
groups and cdev properly for uverbs, and clean up some of the core
code's device list management
- More progress on net namespaces for RDMA devices
- Consolidate driver BAR mmapping support into core code helpers and
rework how RDMA holds poitners to mm_struct for get_user_pages
cases
- First pass to use 'dev_name' instead of ib_device->name
- Device renaming for RDMA devices"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (242 commits)
IB/mlx5: Add support for extended atomic operations
RDMA/core: Fix comment for hw stats init for port == 0
RDMA/core: Refactor ib_register_device() function
RDMA/core: Fix unwinding flow in case of error to register device
ib_srp: Remove WARN_ON in srp_terminate_io()
IB/mlx5: Allow scatter to CQE without global signaled WRs
IB/mlx5: Verify that driver supports user flags
IB/mlx5: Support scatter to CQE for DC transport type
RDMA/drivers: Use core provided API for registering device attributes
RDMA/core: Allow existing drivers to set one sysfs group per device
IB/rxe: Remove unnecessary enum values
RDMA/umad: Use kernel API to allocate umad indexes
RDMA/uverbs: Use kernel API to allocate uverbs indexes
RDMA/core: Increase total number of RDMA ports across all devices
IB/mlx4: Add port and TID to MAD debug print
IB/mlx4: Enable debug print of SMPs
RDMA/core: Rename ports_parent to ports_kobj
RDMA/core: Do not expose unsupported counters
IB/mlx4: Refer to the device kobject instead of ports_parent
RDMA/nldev: Allow IB device rename through RDMA netlink
...
2018-10-26 07:38:19 -07:00
flow_act . flags | = FLOW_ACT_HAS_TAG ;
2018-10-10 09:55:10 +03:00
}
2018-09-06 17:27:05 +03:00
flow_handler = mlx5_ib_raw_fs_rule_add ( dev , fs_matcher , & flow_act ,
cmd_in , inlen ,
2018-07-23 15:25:09 +03:00
dest_id , dest_type ) ;
2018-09-06 17:27:06 +03:00
if ( IS_ERR ( flow_handler ) ) {
ret = PTR_ERR ( flow_handler ) ;
goto err_out ;
}
2018-07-23 15:25:09 +03:00
2018-09-06 17:27:06 +03:00
ib_set_flow ( uobj , & flow_handler - > ibflow , qp , & dev - > ib_dev , uflow_res ) ;
2018-07-23 15:25:09 +03:00
return 0 ;
2018-09-06 17:27:06 +03:00
err_out :
ib_uverbs_flow_resources_free ( uflow_res ) ;
return ret ;
2018-07-23 15:25:09 +03:00
}
2018-07-23 15:25:07 +03:00
static int flow_matcher_cleanup ( struct ib_uobject * uobject ,
enum rdma_remove_reason why )
{
struct mlx5_ib_flow_matcher * obj = uobject - > object ;
int ret ;
ret = ib_destroy_usecnt ( & obj - > usecnt , why , uobject ) ;
if ( ret )
return ret ;
kfree ( obj ) ;
return 0 ;
}
static int UVERBS_HANDLER ( MLX5_IB_METHOD_FLOW_MATCHER_CREATE ) (
2018-07-25 21:40:18 -06:00
struct ib_uverbs_file * file , struct uverbs_attr_bundle * attrs )
2018-07-23 15:25:07 +03:00
{
struct ib_uobject * uobj = uverbs_attr_get_uobject (
attrs , MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE ) ;
struct mlx5_ib_dev * dev = to_mdev ( uobj - > context - > device ) ;
struct mlx5_ib_flow_matcher * obj ;
2018-09-06 17:27:08 +03:00
u32 flags ;
2018-07-23 15:25:07 +03:00
int err ;
obj = kzalloc ( sizeof ( struct mlx5_ib_flow_matcher ) , GFP_KERNEL ) ;
if ( ! obj )
return - ENOMEM ;
2018-09-06 17:27:07 +03:00
obj - > ns_type = MLX5_FLOW_NAMESPACE_BYPASS ;
2018-07-23 15:25:07 +03:00
obj - > mask_len = uverbs_attr_get_len (
attrs , MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK ) ;
err = uverbs_copy_from ( & obj - > matcher_mask ,
attrs ,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK ) ;
if ( err )
goto end ;
obj - > flow_type = uverbs_attr_get_enum_id (
attrs , MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE ) ;
if ( obj - > flow_type = = MLX5_IB_FLOW_TYPE_NORMAL ) {
err = uverbs_copy_from ( & obj - > priority ,
attrs ,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE ) ;
if ( err )
goto end ;
}
err = uverbs_copy_from ( & obj - > match_criteria_enable ,
attrs ,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA ) ;
if ( err )
goto end ;
2018-09-06 17:27:08 +03:00
err = uverbs_get_flags32 ( & flags , attrs ,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS ,
IB_FLOW_ATTR_FLAGS_EGRESS ) ;
if ( err )
goto end ;
if ( flags ) {
err = mlx5_ib_ft_type_to_namespace (
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX , & obj - > ns_type ) ;
if ( err )
goto end ;
}
2018-07-23 15:25:07 +03:00
uobj - > object = obj ;
obj - > mdev = dev - > mdev ;
atomic_set ( & obj - > usecnt , 0 ) ;
return 0 ;
end :
kfree ( obj ) ;
return err ;
}
2018-08-28 14:18:51 +03:00
void mlx5_ib_destroy_flow_action_raw ( struct mlx5_ib_flow_action * maction )
{
switch ( maction - > flow_action_raw . sub_type ) {
case MLX5_IB_FLOW_ACTION_MODIFY_HEADER :
mlx5_modify_header_dealloc ( maction - > flow_action_raw . dev - > mdev ,
maction - > flow_action_raw . action_id ) ;
break ;
2018-08-28 14:18:54 +03:00
case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT :
mlx5_packet_reformat_dealloc ( maction - > flow_action_raw . dev - > mdev ,
maction - > flow_action_raw . action_id ) ;
break ;
2018-08-28 14:18:53 +03:00
case MLX5_IB_FLOW_ACTION_DECAP :
break ;
2018-08-28 14:18:51 +03:00
default :
break ;
}
}
static struct ib_flow_action *
mlx5_ib_create_modify_header ( struct mlx5_ib_dev * dev ,
enum mlx5_ib_uapi_flow_table_type ft_type ,
u8 num_actions , void * in )
{
enum mlx5_flow_namespace_type namespace ;
struct mlx5_ib_flow_action * maction ;
int ret ;
ret = mlx5_ib_ft_type_to_namespace ( ft_type , & namespace ) ;
if ( ret )
return ERR_PTR ( - EINVAL ) ;
maction = kzalloc ( sizeof ( * maction ) , GFP_KERNEL ) ;
if ( ! maction )
return ERR_PTR ( - ENOMEM ) ;
ret = mlx5_modify_header_alloc ( dev - > mdev , namespace , num_actions , in ,
& maction - > flow_action_raw . action_id ) ;
if ( ret ) {
kfree ( maction ) ;
return ERR_PTR ( ret ) ;
}
maction - > flow_action_raw . sub_type =
MLX5_IB_FLOW_ACTION_MODIFY_HEADER ;
maction - > flow_action_raw . dev = dev ;
return & maction - > ib_action ;
}
static bool mlx5_ib_modify_header_supported ( struct mlx5_ib_dev * dev )
{
return MLX5_CAP_FLOWTABLE_NIC_RX ( dev - > mdev ,
max_modify_header_actions ) | |
MLX5_CAP_FLOWTABLE_NIC_TX ( dev - > mdev , max_modify_header_actions ) ;
}
static int UVERBS_HANDLER ( MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER ) (
struct ib_uverbs_file * file ,
struct uverbs_attr_bundle * attrs )
{
struct ib_uobject * uobj = uverbs_attr_get_uobject (
attrs , MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE ) ;
struct mlx5_ib_dev * mdev = to_mdev ( uobj - > context - > device ) ;
enum mlx5_ib_uapi_flow_table_type ft_type ;
struct ib_flow_action * action ;
size_t num_actions ;
void * in ;
int len ;
int ret ;
if ( ! mlx5_ib_modify_header_supported ( mdev ) )
return - EOPNOTSUPP ;
in = uverbs_attr_get_alloced_ptr ( attrs ,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM ) ;
len = uverbs_attr_get_len ( attrs ,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM ) ;
if ( len % MLX5_UN_SZ_BYTES ( set_action_in_add_action_in_auto ) )
return - EINVAL ;
ret = uverbs_get_const ( & ft_type , attrs ,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE ) ;
if ( ret )
return ret ;
num_actions = len / MLX5_UN_SZ_BYTES ( set_action_in_add_action_in_auto ) ,
action = mlx5_ib_create_modify_header ( mdev , ft_type , num_actions , in ) ;
if ( IS_ERR ( action ) )
return PTR_ERR ( action ) ;
2018-08-28 14:18:52 +03:00
uverbs_flow_action_fill_action ( action , uobj , uobj - > context - > device ,
IB_FLOW_ACTION_UNSPECIFIED ) ;
2018-08-28 14:18:51 +03:00
return 0 ;
}
2018-08-28 14:18:53 +03:00
static bool mlx5_ib_flow_action_packet_reformat_valid ( struct mlx5_ib_dev * ibdev ,
u8 packet_reformat_type ,
u8 ft_type )
{
switch ( packet_reformat_type ) {
2018-08-28 14:18:54 +03:00
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL :
if ( ft_type = = MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX )
return MLX5_CAP_FLOWTABLE ( ibdev - > mdev ,
encap_general_header ) ;
break ;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
if ( ft_type = = MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX )
return MLX5_CAP_FLOWTABLE_NIC_TX ( ibdev - > mdev ,
reformat_l2_to_l3_tunnel ) ;
break ;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
if ( ft_type = = MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX )
return MLX5_CAP_FLOWTABLE_NIC_RX ( ibdev - > mdev ,
reformat_l3_tunnel_to_l2 ) ;
break ;
2018-08-28 14:18:53 +03:00
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 :
if ( ft_type = = MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX )
return MLX5_CAP_FLOWTABLE_NIC_RX ( ibdev - > mdev , decap ) ;
break ;
default :
break ;
}
return false ;
}
2018-08-28 14:18:54 +03:00
static int mlx5_ib_dv_to_prm_packet_reforamt_type ( u8 dv_prt , u8 * prm_prt )
{
switch ( dv_prt ) {
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL :
* prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL ;
break ;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
* prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 ;
break ;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
* prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static int mlx5_ib_flow_action_create_packet_reformat_ctx (
struct mlx5_ib_dev * dev ,
struct mlx5_ib_flow_action * maction ,
u8 ft_type , u8 dv_prt ,
void * in , size_t len )
{
enum mlx5_flow_namespace_type namespace ;
u8 prm_prt ;
int ret ;
ret = mlx5_ib_ft_type_to_namespace ( ft_type , & namespace ) ;
if ( ret )
return ret ;
ret = mlx5_ib_dv_to_prm_packet_reforamt_type ( dv_prt , & prm_prt ) ;
if ( ret )
return ret ;
ret = mlx5_packet_reformat_alloc ( dev - > mdev , prm_prt , len ,
in , namespace ,
& maction - > flow_action_raw . action_id ) ;
if ( ret )
return ret ;
maction - > flow_action_raw . sub_type =
MLX5_IB_FLOW_ACTION_PACKET_REFORMAT ;
maction - > flow_action_raw . dev = dev ;
return 0 ;
}
2018-08-28 14:18:53 +03:00
static int UVERBS_HANDLER ( MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT ) (
struct ib_uverbs_file * file ,
struct uverbs_attr_bundle * attrs )
{
struct ib_uobject * uobj = uverbs_attr_get_uobject ( attrs ,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE ) ;
struct mlx5_ib_dev * mdev = to_mdev ( uobj - > context - > device ) ;
enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt ;
enum mlx5_ib_uapi_flow_table_type ft_type ;
struct mlx5_ib_flow_action * maction ;
int ret ;
ret = uverbs_get_const ( & ft_type , attrs ,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE ) ;
if ( ret )
return ret ;
ret = uverbs_get_const ( & dv_prt , attrs ,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE ) ;
if ( ret )
return ret ;
if ( ! mlx5_ib_flow_action_packet_reformat_valid ( mdev , dv_prt , ft_type ) )
return - EOPNOTSUPP ;
maction = kzalloc ( sizeof ( * maction ) , GFP_KERNEL ) ;
if ( ! maction )
return - ENOMEM ;
if ( dv_prt = =
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 ) {
maction - > flow_action_raw . sub_type =
MLX5_IB_FLOW_ACTION_DECAP ;
maction - > flow_action_raw . dev = mdev ;
2018-08-28 14:18:54 +03:00
} else {
void * in ;
int len ;
in = uverbs_attr_get_alloced_ptr ( attrs ,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF ) ;
if ( IS_ERR ( in ) ) {
ret = PTR_ERR ( in ) ;
goto free_maction ;
}
len = uverbs_attr_get_len ( attrs ,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF ) ;
ret = mlx5_ib_flow_action_create_packet_reformat_ctx ( mdev ,
maction , ft_type , dv_prt , in , len ) ;
if ( ret )
goto free_maction ;
2018-08-28 14:18:53 +03:00
}
uverbs_flow_action_fill_action ( & maction - > ib_action , uobj ,
uobj - > context - > device ,
IB_FLOW_ACTION_UNSPECIFIED ) ;
return 0 ;
2018-08-28 14:18:54 +03:00
free_maction :
kfree ( maction ) ;
return ret ;
2018-08-28 14:18:53 +03:00
}
2018-07-23 15:25:09 +03:00
DECLARE_UVERBS_NAMED_METHOD (
MLX5_IB_METHOD_CREATE_FLOW ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_FLOW_HANDLE ,
UVERBS_OBJECT_FLOW ,
UVERBS_ACCESS_NEW ,
UA_MANDATORY ) ,
UVERBS_ATTR_PTR_IN (
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE ,
UVERBS_ATTR_SIZE ( 1 , sizeof ( struct mlx5_ib_match_params ) ) ,
UA_MANDATORY ,
UA_ALLOC_AND_COPY ) ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_FLOW_MATCHER ,
MLX5_IB_OBJECT_FLOW_MATCHER ,
UVERBS_ACCESS_READ ,
UA_MANDATORY ) ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_FLOW_DEST_QP ,
UVERBS_OBJECT_QP ,
UVERBS_ACCESS_READ ) ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX ,
MLX5_IB_OBJECT_DEVX_OBJ ,
2018-09-06 17:27:06 +03:00
UVERBS_ACCESS_READ ) ,
UVERBS_ATTR_IDRS_ARR ( MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS ,
UVERBS_OBJECT_FLOW_ACTION ,
UVERBS_ACCESS_READ , 1 ,
MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS ,
2018-10-10 09:55:10 +03:00
UA_OPTIONAL ) ,
UVERBS_ATTR_PTR_IN ( MLX5_IB_ATTR_CREATE_FLOW_TAG ,
UVERBS_ATTR_TYPE ( u32 ) ,
UA_OPTIONAL ) ) ;
2018-07-23 15:25:09 +03:00
DECLARE_UVERBS_NAMED_METHOD_DESTROY (
MLX5_IB_METHOD_DESTROY_FLOW ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_FLOW_HANDLE ,
UVERBS_OBJECT_FLOW ,
UVERBS_ACCESS_DESTROY ,
UA_MANDATORY ) ) ;
ADD_UVERBS_METHODS ( mlx5_ib_fs ,
UVERBS_OBJECT_FLOW ,
& UVERBS_METHOD ( MLX5_IB_METHOD_CREATE_FLOW ) ,
& UVERBS_METHOD ( MLX5_IB_METHOD_DESTROY_FLOW ) ) ;
2018-08-28 14:18:51 +03:00
DECLARE_UVERBS_NAMED_METHOD (
MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE ,
UVERBS_OBJECT_FLOW_ACTION ,
UVERBS_ACCESS_NEW ,
UA_MANDATORY ) ,
UVERBS_ATTR_PTR_IN ( MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM ,
UVERBS_ATTR_MIN_SIZE ( MLX5_UN_SZ_BYTES (
set_action_in_add_action_in_auto ) ) ,
UA_MANDATORY ,
UA_ALLOC_AND_COPY ) ,
UVERBS_ATTR_CONST_IN ( MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE ,
enum mlx5_ib_uapi_flow_table_type ,
UA_MANDATORY ) ) ;
2018-08-28 14:18:53 +03:00
DECLARE_UVERBS_NAMED_METHOD (
MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE ,
UVERBS_OBJECT_FLOW_ACTION ,
UVERBS_ACCESS_NEW ,
UA_MANDATORY ) ,
2018-08-28 14:18:54 +03:00
UVERBS_ATTR_PTR_IN ( MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF ,
UVERBS_ATTR_MIN_SIZE ( 1 ) ,
UA_ALLOC_AND_COPY ,
UA_OPTIONAL ) ,
2018-08-28 14:18:53 +03:00
UVERBS_ATTR_CONST_IN ( MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE ,
enum mlx5_ib_uapi_flow_action_packet_reformat_type ,
UA_MANDATORY ) ,
UVERBS_ATTR_CONST_IN ( MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE ,
enum mlx5_ib_uapi_flow_table_type ,
UA_MANDATORY ) ) ;
2018-08-28 14:18:51 +03:00
ADD_UVERBS_METHODS (
mlx5_ib_flow_actions ,
UVERBS_OBJECT_FLOW_ACTION ,
2018-08-28 14:18:53 +03:00
& UVERBS_METHOD ( MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER ) ,
& UVERBS_METHOD ( MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT ) ) ;
2018-08-28 14:18:51 +03:00
2018-07-23 15:25:07 +03:00
DECLARE_UVERBS_NAMED_METHOD (
MLX5_IB_METHOD_FLOW_MATCHER_CREATE ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE ,
MLX5_IB_OBJECT_FLOW_MATCHER ,
UVERBS_ACCESS_NEW ,
UA_MANDATORY ) ,
UVERBS_ATTR_PTR_IN (
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK ,
UVERBS_ATTR_SIZE ( 1 , sizeof ( struct mlx5_ib_match_params ) ) ,
UA_MANDATORY ) ,
UVERBS_ATTR_ENUM_IN ( MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE ,
mlx5_ib_flow_type ,
UA_MANDATORY ) ,
UVERBS_ATTR_PTR_IN ( MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA ,
UVERBS_ATTR_TYPE ( u8 ) ,
2018-09-06 17:27:08 +03:00
UA_MANDATORY ) ,
UVERBS_ATTR_FLAGS_IN ( MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS ,
enum ib_flow_flags ,
UA_OPTIONAL ) ) ;
2018-07-23 15:25:07 +03:00
DECLARE_UVERBS_NAMED_METHOD_DESTROY (
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY ,
UVERBS_ATTR_IDR ( MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE ,
MLX5_IB_OBJECT_FLOW_MATCHER ,
UVERBS_ACCESS_DESTROY ,
UA_MANDATORY ) ) ;
DECLARE_UVERBS_NAMED_OBJECT ( MLX5_IB_OBJECT_FLOW_MATCHER ,
UVERBS_TYPE_ALLOC_IDR ( flow_matcher_cleanup ) ,
& UVERBS_METHOD ( MLX5_IB_METHOD_FLOW_MATCHER_CREATE ) ,
& UVERBS_METHOD ( MLX5_IB_METHOD_FLOW_MATCHER_DESTROY ) ) ;
DECLARE_UVERBS_OBJECT_TREE ( flow_objects ,
& UVERBS_OBJECT ( MLX5_IB_OBJECT_FLOW_MATCHER ) ) ;
2018-07-23 15:25:12 +03:00
int mlx5_ib_get_flow_trees ( const struct uverbs_object_tree_def * * root )
{
int i = 0 ;
root [ i + + ] = & flow_objects ;
root [ i + + ] = & mlx5_ib_fs ;
2018-08-28 14:18:51 +03:00
root [ i + + ] = & mlx5_ib_flow_actions ;
2018-07-23 15:25:12 +03:00
return i ;
}