2020-03-12 23:08:23 +03:00
// SPDX-License-Identifier: GPL-2.0-only
# include "netlink.h"
# include "common.h"
struct rings_req_info {
struct ethnl_req_info base ;
} ;
struct rings_reply_data {
struct ethnl_reply_data base ;
struct ethtool_ringparam ringparam ;
2021-11-18 15:12:42 +03:00
struct kernel_ethtool_ringparam kernel_ringparam ;
2023-03-23 19:36:05 +03:00
u32 supported_ring_params ;
2020-03-12 23:08:23 +03:00
} ;
# define RINGS_REPDATA(__reply_base) \
container_of ( __reply_base , struct rings_reply_data , base )
2020-10-06 01:07:35 +03:00
const struct nla_policy ethnl_rings_get_policy [ ] = {
2020-10-06 01:07:36 +03:00
[ ETHTOOL_A_RINGS_HEADER ] =
NLA_POLICY_NESTED ( ethnl_header_policy ) ,
2020-03-12 23:08:23 +03:00
} ;
static int rings_prepare_data ( const struct ethnl_req_info * req_base ,
struct ethnl_reply_data * reply_base ,
2023-08-15 00:47:23 +03:00
const struct genl_info * info )
2020-03-12 23:08:23 +03:00
{
struct rings_reply_data * data = RINGS_REPDATA ( reply_base ) ;
struct net_device * dev = reply_base - > dev ;
int ret ;
if ( ! dev - > ethtool_ops - > get_ringparam )
return - EOPNOTSUPP ;
2023-03-23 19:36:05 +03:00
data - > supported_ring_params = dev - > ethtool_ops - > supported_ring_params ;
2020-03-12 23:08:23 +03:00
ret = ethnl_ops_begin ( dev ) ;
if ( ret < 0 )
return ret ;
2021-11-18 15:12:43 +03:00
dev - > ethtool_ops - > get_ringparam ( dev , & data - > ringparam ,
2023-08-15 00:47:23 +03:00
& data - > kernel_ringparam , info - > extack ) ;
2020-03-12 23:08:23 +03:00
ethnl_ops_complete ( dev ) ;
return 0 ;
}
static int rings_reply_size ( const struct ethnl_req_info * req_base ,
const struct ethnl_reply_data * reply_base )
{
return nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_MAX */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_MINI_MAX */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_JUMBO_MAX */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_TX_MAX */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_MINI */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_JUMBO */
2021-11-18 15:12:42 +03:00
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_TX */
2022-01-27 21:42:59 +03:00
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_RX_BUF_LEN */
2022-02-22 21:39:12 +03:00
nla_total_size ( sizeof ( u8 ) ) + /* _RINGS_TCP_DATA_SPLIT */
2022-04-12 05:01:19 +03:00
nla_total_size ( sizeof ( u32 ) + /* _RINGS_CQE_SIZE */
2023-02-11 03:50:16 +03:00
nla_total_size ( sizeof ( u8 ) ) + /* _RINGS_TX_PUSH */
2023-03-23 19:36:05 +03:00
nla_total_size ( sizeof ( u8 ) ) ) + /* _RINGS_RX_PUSH */
nla_total_size ( sizeof ( u32 ) ) + /* _RINGS_TX_PUSH_BUF_LEN */
nla_total_size ( sizeof ( u32 ) ) ; /* _RINGS_TX_PUSH_BUF_LEN_MAX */
2020-03-12 23:08:23 +03:00
}
static int rings_fill_reply ( struct sk_buff * skb ,
const struct ethnl_req_info * req_base ,
const struct ethnl_reply_data * reply_base )
{
const struct rings_reply_data * data = RINGS_REPDATA ( reply_base ) ;
2022-01-27 21:42:59 +03:00
const struct kernel_ethtool_ringparam * kr = & data - > kernel_ringparam ;
2020-03-12 23:08:23 +03:00
const struct ethtool_ringparam * ringparam = & data - > ringparam ;
2023-03-23 19:36:05 +03:00
u32 supported_ring_params = data - > supported_ring_params ;
2020-03-12 23:08:23 +03:00
2022-01-27 21:42:59 +03:00
WARN_ON ( kr - > tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED ) ;
2020-03-12 23:08:23 +03:00
if ( ( ringparam - > rx_max_pending & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_MAX ,
ringparam - > rx_max_pending ) | |
nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX ,
ringparam - > rx_pending ) ) ) | |
( ringparam - > rx_mini_max_pending & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_MINI_MAX ,
ringparam - > rx_mini_max_pending ) | |
nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_MINI ,
ringparam - > rx_mini_pending ) ) ) | |
( ringparam - > rx_jumbo_max_pending & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_JUMBO_MAX ,
ringparam - > rx_jumbo_max_pending ) | |
nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_JUMBO ,
ringparam - > rx_jumbo_pending ) ) ) | |
( ringparam - > tx_max_pending & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_TX_MAX ,
ringparam - > tx_max_pending ) | |
nla_put_u32 ( skb , ETHTOOL_A_RINGS_TX ,
2021-11-18 15:12:42 +03:00
ringparam - > tx_pending ) ) ) | |
2022-01-27 21:42:59 +03:00
( kr - > rx_buf_len & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_RX_BUF_LEN , kr - > rx_buf_len ) ) ) | |
( kr - > tcp_data_split & &
( nla_put_u8 ( skb , ETHTOOL_A_RINGS_TCP_DATA_SPLIT ,
2022-02-22 21:39:12 +03:00
kr - > tcp_data_split ) ) ) | |
( kr - > cqe_size & &
2022-04-12 05:01:19 +03:00
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_CQE_SIZE , kr - > cqe_size ) ) ) | |
2023-02-11 03:50:16 +03:00
nla_put_u8 ( skb , ETHTOOL_A_RINGS_TX_PUSH , ! ! kr - > tx_push ) | |
2023-03-23 19:36:05 +03:00
nla_put_u8 ( skb , ETHTOOL_A_RINGS_RX_PUSH , ! ! kr - > rx_push ) | |
( ( supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN ) & &
( nla_put_u32 ( skb , ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX ,
kr - > tx_push_buf_max_len ) | |
nla_put_u32 ( skb , ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ,
kr - > tx_push_buf_len ) ) ) )
2020-03-12 23:08:23 +03:00
return - EMSGSIZE ;
return 0 ;
}
2020-03-12 23:08:28 +03:00
/* RINGS_SET */
2020-10-06 01:07:35 +03:00
const struct nla_policy ethnl_rings_set_policy [ ] = {
2020-10-06 01:07:36 +03:00
[ ETHTOOL_A_RINGS_HEADER ] =
NLA_POLICY_NESTED ( ethnl_header_policy ) ,
2020-03-12 23:08:28 +03:00
[ ETHTOOL_A_RINGS_RX ] = { . type = NLA_U32 } ,
[ ETHTOOL_A_RINGS_RX_MINI ] = { . type = NLA_U32 } ,
[ ETHTOOL_A_RINGS_RX_JUMBO ] = { . type = NLA_U32 } ,
[ ETHTOOL_A_RINGS_TX ] = { . type = NLA_U32 } ,
2021-11-18 15:12:42 +03:00
[ ETHTOOL_A_RINGS_RX_BUF_LEN ] = NLA_POLICY_MIN ( NLA_U32 , 1 ) ,
2023-12-12 17:27:51 +03:00
[ ETHTOOL_A_RINGS_TCP_DATA_SPLIT ] =
NLA_POLICY_MAX ( NLA_U8 , ETHTOOL_TCP_DATA_SPLIT_ENABLED ) ,
2022-02-22 21:39:12 +03:00
[ ETHTOOL_A_RINGS_CQE_SIZE ] = NLA_POLICY_MIN ( NLA_U32 , 1 ) ,
2022-04-12 05:01:19 +03:00
[ ETHTOOL_A_RINGS_TX_PUSH ] = NLA_POLICY_MAX ( NLA_U8 , 1 ) ,
2023-02-11 03:50:16 +03:00
[ ETHTOOL_A_RINGS_RX_PUSH ] = NLA_POLICY_MAX ( NLA_U8 , 1 ) ,
2023-03-23 19:36:05 +03:00
[ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ] = { . type = NLA_U32 } ,
2020-03-12 23:08:28 +03:00
} ;
2023-01-26 02:05:19 +03:00
static int
ethnl_set_rings_validate ( struct ethnl_req_info * req_info ,
struct genl_info * info )
2020-03-12 23:08:28 +03:00
{
2023-01-26 02:05:19 +03:00
const struct ethtool_ops * ops = req_info - > dev - > ethtool_ops ;
2020-10-06 01:07:34 +03:00
struct nlattr * * tb = info - > attrs ;
2020-03-12 23:08:28 +03:00
2022-04-12 05:01:20 +03:00
if ( tb [ ETHTOOL_A_RINGS_RX_BUF_LEN ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_RX_BUF_LEN ] ,
" setting rx buf len not supported " ) ;
2023-01-26 02:05:19 +03:00
return - EOPNOTSUPP ;
2022-04-12 05:01:20 +03:00
}
2023-12-12 17:27:51 +03:00
if ( tb [ ETHTOOL_A_RINGS_TCP_DATA_SPLIT ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_TCP_DATA_SPLIT ] ,
" setting TCP data split is not supported " ) ;
return - EOPNOTSUPP ;
}
2022-04-12 05:01:20 +03:00
if ( tb [ ETHTOOL_A_RINGS_CQE_SIZE ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_CQE_SIZE ] ,
" setting cqe size not supported " ) ;
2023-01-26 02:05:19 +03:00
return - EOPNOTSUPP ;
2022-04-12 05:01:20 +03:00
}
2022-04-12 05:01:19 +03:00
if ( tb [ ETHTOOL_A_RINGS_TX_PUSH ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_TX_PUSH ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_TX_PUSH ] ,
" setting tx push not supported " ) ;
2023-01-26 02:05:19 +03:00
return - EOPNOTSUPP ;
2022-04-12 05:01:19 +03:00
}
2023-02-11 03:50:16 +03:00
if ( tb [ ETHTOOL_A_RINGS_RX_PUSH ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_RX_PUSH ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_RX_PUSH ] ,
" setting rx push not supported " ) ;
return - EOPNOTSUPP ;
}
2023-03-23 19:36:05 +03:00
if ( tb [ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ] & &
! ( ops - > supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN ) ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ] ,
" setting tx push buf len is not supported " ) ;
return - EOPNOTSUPP ;
}
2023-01-26 02:05:19 +03:00
return ops - > get_ringparam & & ops - > set_ringparam ? 1 : - EOPNOTSUPP ;
}
static int
ethnl_set_rings ( struct ethnl_req_info * req_info , struct genl_info * info )
{
struct kernel_ethtool_ringparam kernel_ringparam = { } ;
struct ethtool_ringparam ringparam = { } ;
struct net_device * dev = req_info - > dev ;
struct nlattr * * tb = info - > attrs ;
const struct nlattr * err_attr ;
bool mod = false ;
int ret ;
dev - > ethtool_ops - > get_ringparam ( dev , & ringparam ,
& kernel_ringparam , info - > extack ) ;
2020-03-12 23:08:28 +03:00
ethnl_update_u32 ( & ringparam . rx_pending , tb [ ETHTOOL_A_RINGS_RX ] , & mod ) ;
ethnl_update_u32 ( & ringparam . rx_mini_pending ,
tb [ ETHTOOL_A_RINGS_RX_MINI ] , & mod ) ;
ethnl_update_u32 ( & ringparam . rx_jumbo_pending ,
tb [ ETHTOOL_A_RINGS_RX_JUMBO ] , & mod ) ;
ethnl_update_u32 ( & ringparam . tx_pending , tb [ ETHTOOL_A_RINGS_TX ] , & mod ) ;
2021-11-18 15:12:42 +03:00
ethnl_update_u32 ( & kernel_ringparam . rx_buf_len ,
tb [ ETHTOOL_A_RINGS_RX_BUF_LEN ] , & mod ) ;
2023-12-12 17:27:51 +03:00
ethnl_update_u8 ( & kernel_ringparam . tcp_data_split ,
tb [ ETHTOOL_A_RINGS_TCP_DATA_SPLIT ] , & mod ) ;
2022-02-22 21:39:12 +03:00
ethnl_update_u32 ( & kernel_ringparam . cqe_size ,
tb [ ETHTOOL_A_RINGS_CQE_SIZE ] , & mod ) ;
2022-04-12 05:01:19 +03:00
ethnl_update_u8 ( & kernel_ringparam . tx_push ,
tb [ ETHTOOL_A_RINGS_TX_PUSH ] , & mod ) ;
2023-02-11 03:50:16 +03:00
ethnl_update_u8 ( & kernel_ringparam . rx_push ,
tb [ ETHTOOL_A_RINGS_RX_PUSH ] , & mod ) ;
2023-03-23 19:36:05 +03:00
ethnl_update_u32 ( & kernel_ringparam . tx_push_buf_len ,
tb [ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ] , & mod ) ;
2020-03-12 23:08:28 +03:00
if ( ! mod )
2023-01-26 02:05:19 +03:00
return 0 ;
2020-03-12 23:08:28 +03:00
/* ensure new ring parameters are within limits */
if ( ringparam . rx_pending > ringparam . rx_max_pending )
err_attr = tb [ ETHTOOL_A_RINGS_RX ] ;
else if ( ringparam . rx_mini_pending > ringparam . rx_mini_max_pending )
err_attr = tb [ ETHTOOL_A_RINGS_RX_MINI ] ;
else if ( ringparam . rx_jumbo_pending > ringparam . rx_jumbo_max_pending )
err_attr = tb [ ETHTOOL_A_RINGS_RX_JUMBO ] ;
else if ( ringparam . tx_pending > ringparam . tx_max_pending )
err_attr = tb [ ETHTOOL_A_RINGS_TX ] ;
else
err_attr = NULL ;
if ( err_attr ) {
NL_SET_ERR_MSG_ATTR ( info - > extack , err_attr ,
2020-03-13 14:25:34 +03:00
" requested ring size exceeds maximum " ) ;
2023-01-26 02:05:19 +03:00
return - EINVAL ;
2020-03-12 23:08:28 +03:00
}
2023-03-23 19:36:05 +03:00
if ( kernel_ringparam . tx_push_buf_len > kernel_ringparam . tx_push_buf_max_len ) {
NL_SET_ERR_MSG_ATTR_FMT ( info - > extack , tb [ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN ] ,
" Requested TX push buffer exceeds the maximum of %u " ,
kernel_ringparam . tx_push_buf_max_len ) ;
return - EINVAL ;
}
2021-11-18 15:12:43 +03:00
ret = dev - > ethtool_ops - > set_ringparam ( dev , & ringparam ,
& kernel_ringparam , info - > extack ) ;
2023-01-26 02:05:19 +03:00
return ret < 0 ? ret : 1 ;
2020-03-12 23:08:28 +03:00
}
2023-01-26 02:05:19 +03:00
const struct ethnl_request_ops ethnl_rings_request_ops = {
. request_cmd = ETHTOOL_MSG_RINGS_GET ,
. reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY ,
. hdr_attr = ETHTOOL_A_RINGS_HEADER ,
. req_info_size = sizeof ( struct rings_req_info ) ,
. reply_data_size = sizeof ( struct rings_reply_data ) ,
. prepare_data = rings_prepare_data ,
. reply_size = rings_reply_size ,
. fill_reply = rings_fill_reply ,
. set_validate = ethnl_set_rings_validate ,
. set = ethnl_set_rings ,
. set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF ,
} ;