2023-01-05 07:05:19 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright ( c ) 2016 Jiri Pirko < jiri @ mellanox . com >
*/
2023-08-28 09:16:44 +03:00
# include <linux/etherdevice.h>
2023-01-05 07:05:19 +03:00
# include <linux/mutex.h>
# include <linux/netdevice.h>
# include <linux/notifier.h>
# include <linux/types.h>
2023-01-11 07:29:08 +03:00
# include <linux/workqueue.h>
2023-01-05 07:05:19 +03:00
# include <linux/xarray.h>
# include <net/devlink.h>
# include <net/net_namespace.h>
2023-08-28 09:16:44 +03:00
# include <net/rtnetlink.h>
# include <rdma/ib_verbs.h>
2023-01-05 07:05:19 +03:00
2023-08-03 14:13:39 +03:00
# include "netlink_gen.h"
2023-09-13 10:12:38 +03:00
struct devlink_rel ;
2023-01-05 07:05:19 +03:00
# define DEVLINK_REGISTERED XA_MARK_1
# define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
( __DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX )
struct devlink_dev_stats {
u32 reload_stats [ DEVLINK_RELOAD_STATS_ARRAY_SIZE ] ;
u32 remote_reload_stats [ DEVLINK_RELOAD_STATS_ARRAY_SIZE ] ;
} ;
struct devlink {
u32 index ;
struct xarray ports ;
struct list_head rate_list ;
struct list_head sb_list ;
struct list_head dpipe_table_list ;
struct list_head resource_list ;
2023-02-10 13:01:29 +03:00
struct xarray params ;
2023-01-05 07:05:19 +03:00
struct list_head region_list ;
struct list_head reporter_list ;
struct devlink_dpipe_headers * dpipe_headers ;
struct list_head trap_list ;
struct list_head trap_group_list ;
struct list_head trap_policer_list ;
struct list_head linecard_list ;
const struct devlink_ops * ops ;
struct xarray snapshot_ids ;
struct devlink_dev_stats stats ;
struct device * dev ;
possible_net_t _net ;
/* Serializes access to devlink instance specific objects such as
* port , sb , dpipe , resource , params , region , traps and more .
*/
struct mutex lock ;
struct lock_class_key lock_key ;
u8 reload_failed : 1 ;
refcount_t refcount ;
2023-01-11 07:29:08 +03:00
struct rcu_work rwork ;
2023-09-13 10:12:38 +03:00
struct devlink_rel * rel ;
2023-09-13 10:12:42 +03:00
struct xarray nested_rels ;
2023-01-05 07:05:19 +03:00
char priv [ ] __aligned ( NETDEV_ALIGN ) ;
} ;
extern struct xarray devlinks ;
extern struct genl_family devlink_nl_family ;
/* devlink instances are open to the access from the user space after
* devlink_register ( ) call . Such logical barrier allows us to have certain
* expectations related to locking .
*
* Before * _register ( ) - we are in initialization stage and no parallel
* access possible to the devlink instance . All drivers perform that phase
* by implicitly holding device_lock .
*
* After * _register ( ) - users and driver can access devlink instance at
* the same time .
*/
# define ASSERT_DEVLINK_REGISTERED(d) \
WARN_ON_ONCE ( ! xa_get_mark ( & devlinks , ( d ) - > index , DEVLINK_REGISTERED ) )
# define ASSERT_DEVLINK_NOT_REGISTERED(d) \
WARN_ON_ONCE ( xa_get_mark ( & devlinks , ( d ) - > index , DEVLINK_REGISTERED ) )
/* Iterate over devlink pointers which were possible to get reference to.
* devlink_put ( ) needs to be called for each iterated devlink pointer
* in loop body in order to release the reference .
*/
# define devlinks_xa_for_each_registered_get(net, index, devlink) \
2023-01-06 09:33:54 +03:00
for ( index = 0 ; ( devlink = devlinks_xa_find_get ( net , & index ) ) ; index + + )
struct devlink * devlinks_xa_find_get ( struct net * net , unsigned long * indexp ) ;
2023-01-05 07:05:19 +03:00
2023-01-06 09:33:57 +03:00
static inline bool devl_is_registered ( struct devlink * devlink )
{
2023-01-18 18:21:15 +03:00
devl_assert_locked ( devlink ) ;
2023-01-06 09:33:57 +03:00
return xa_get_mark ( & devlinks , devlink - > index , DEVLINK_REGISTERED ) ;
}
2023-09-13 10:12:38 +03:00
typedef void devlink_rel_notify_cb_t ( struct devlink * devlink , u32 obj_index ) ;
typedef void devlink_rel_cleanup_cb_t ( struct devlink * devlink , u32 obj_index ,
u32 rel_index ) ;
void devlink_rel_nested_in_clear ( u32 rel_index ) ;
int devlink_rel_nested_in_add ( u32 * rel_index , u32 devlink_index ,
u32 obj_index , devlink_rel_notify_cb_t * notify_cb ,
devlink_rel_cleanup_cb_t * cleanup_cb ,
struct devlink * devlink ) ;
void devlink_rel_nested_in_notify ( struct devlink * devlink ) ;
int devlink_rel_devlink_handle_put ( struct sk_buff * msg , struct devlink * devlink ,
u32 rel_index , int attrtype ,
bool * msg_updated ) ;
2023-01-05 07:05:19 +03:00
/* Netlink */
2023-01-05 07:05:20 +03:00
# define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
# define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
enum devlink_multicast_groups {
DEVLINK_MCGRP_CONFIG ,
} ;
2023-01-05 07:05:22 +03:00
/* state held across netlink dumps */
struct devlink_nl_dump_state {
2023-01-05 07:05:26 +03:00
unsigned long instance ;
2023-01-05 07:05:22 +03:00
int idx ;
union {
/* DEVLINK_CMD_REGION_READ */
struct {
u64 start_offset ;
} ;
/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET */
struct {
u64 dump_ts ;
} ;
} ;
} ;
2023-08-03 14:13:35 +03:00
typedef int devlink_nl_dump_one_func_t ( struct sk_buff * msg ,
struct devlink * devlink ,
2023-08-11 18:57:07 +03:00
struct netlink_callback * cb ,
int flags ) ;
2023-08-03 14:13:35 +03:00
2023-01-06 09:33:56 +03:00
struct devlink *
devlink_get_from_attrs_lock ( struct net * net , struct nlattr * * attrs ) ;
2023-01-05 07:05:20 +03:00
2023-08-03 14:13:35 +03:00
int devlink_nl_dumpit ( struct sk_buff * msg , struct netlink_callback * cb ,
devlink_nl_dump_one_func_t * dump_one ) ;
2023-01-05 07:05:30 +03:00
2023-01-05 07:05:22 +03:00
static inline struct devlink_nl_dump_state *
devlink_dump_state ( struct netlink_callback * cb )
{
2023-01-24 01:22:24 +03:00
NL_ASSERT_DUMP_CTX_FITS ( struct devlink_nl_dump_state ) ;
2023-01-05 07:05:22 +03:00
return ( struct devlink_nl_dump_state * ) cb - > ctx ;
}
2023-02-02 17:47:00 +03:00
static inline int
devlink_nl_put_handle ( struct sk_buff * msg , struct devlink * devlink )
{
if ( nla_put_string ( msg , DEVLINK_ATTR_BUS_NAME , devlink - > dev - > bus - > name ) )
return - EMSGSIZE ;
if ( nla_put_string ( msg , DEVLINK_ATTR_DEV_NAME , dev_name ( devlink - > dev ) ) )
return - EMSGSIZE ;
return 0 ;
}
2023-09-13 10:12:36 +03:00
int devlink_nl_put_nested_handle ( struct sk_buff * msg , struct net * net ,
2023-09-13 10:12:37 +03:00
struct devlink * devlink , int attrtype ) ;
2023-08-28 09:16:46 +03:00
int devlink_nl_msg_reply_and_new ( struct sk_buff * * msg , struct genl_info * info ) ;
2023-02-02 17:47:00 +03:00
/* Notify */
2023-08-28 09:16:57 +03:00
void devlink_notify_register ( struct devlink * devlink ) ;
void devlink_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:44 +03:00
void devlink_ports_notify_register ( struct devlink * devlink ) ;
void devlink_ports_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:49 +03:00
void devlink_params_notify_register ( struct devlink * devlink ) ;
void devlink_params_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:50 +03:00
void devlink_regions_notify_register ( struct devlink * devlink ) ;
void devlink_regions_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:52 +03:00
void devlink_trap_policers_notify_register ( struct devlink * devlink ) ;
void devlink_trap_policers_notify_unregister ( struct devlink * devlink ) ;
void devlink_trap_groups_notify_register ( struct devlink * devlink ) ;
void devlink_trap_groups_notify_unregister ( struct devlink * devlink ) ;
void devlink_traps_notify_register ( struct devlink * devlink ) ;
void devlink_traps_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:53 +03:00
void devlink_rates_notify_register ( struct devlink * devlink ) ;
void devlink_rates_notify_unregister ( struct devlink * devlink ) ;
2023-08-28 09:16:54 +03:00
void devlink_linecards_notify_register ( struct devlink * devlink ) ;
void devlink_linecards_notify_unregister ( struct devlink * devlink ) ;
2023-02-02 17:47:00 +03:00
2023-01-05 07:05:19 +03:00
/* Ports */
2023-08-28 09:16:44 +03:00
# define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port) \
WARN_ON_ONCE ( ! ( devlink_port ) - > initialized )
struct devlink_port * devlink_port_get_by_index ( struct devlink * devlink ,
unsigned int port_index ) ;
2023-01-05 07:05:19 +03:00
int devlink_port_netdevice_event ( struct notifier_block * nb ,
unsigned long event , void * ptr ) ;
2023-01-05 07:05:20 +03:00
struct devlink_port *
devlink_port_get_from_info ( struct devlink * devlink , struct genl_info * info ) ;
2023-02-14 19:37:59 +03:00
struct devlink_port * devlink_port_get_from_attrs ( struct devlink * devlink ,
struct nlattr * * attrs ) ;
2023-01-05 07:05:20 +03:00
2023-01-05 07:05:19 +03:00
/* Reload */
bool devlink_reload_actions_valid ( const struct devlink_ops * ops ) ;
int devlink_reload ( struct devlink * devlink , struct net * dest_net ,
enum devlink_reload_action action ,
enum devlink_reload_limit limit ,
u32 * actions_performed , struct netlink_ext_ack * extack ) ;
static inline bool devlink_reload_supported ( const struct devlink_ops * ops )
{
return ops - > reload_down & & ops - > reload_up ;
}
2023-01-05 07:05:20 +03:00
2023-02-10 13:01:26 +03:00
/* Params */
void devlink_params_driverinit_load_new ( struct devlink * devlink ) ;
2023-02-02 17:47:01 +03:00
/* Resources */
struct devlink_resource ;
int devlink_resources_validate ( struct devlink * devlink ,
struct devlink_resource * resource ,
struct genl_info * info ) ;
2023-01-05 07:05:20 +03:00
/* Rates */
2023-02-02 17:47:02 +03:00
int devlink_rate_nodes_check ( struct devlink * devlink , u16 mode ,
struct netlink_ext_ack * extack ) ;
2023-08-11 18:57:03 +03:00
2023-08-28 09:16:54 +03:00
/* Linecards */
2023-09-13 10:12:32 +03:00
unsigned int devlink_linecard_index ( struct devlink_linecard * linecard ) ;