2023-11-27 02:07:29 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/mutex.h>
2023-11-27 02:07:30 +03:00
# include <linux/netdevice.h>
2023-11-27 02:07:29 +03:00
# include <linux/xarray.h>
2023-11-27 02:07:30 +03:00
# include <net/net_debug.h>
2023-11-27 02:07:29 +03:00
# include <net/page_pool/types.h>
net: page_pool: expose page pool stats via netlink
Dump the stats into netlink. More clever approaches
like dumping the stats per-CPU for each CPU individually
to see where the packets get consumed can be implemented
in the future.
A trimmed example from a real (but recently booted system):
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-stats-get
[{'info': {'id': 19, 'ifindex': 2},
'alloc-empty': 48,
'alloc-fast': 3024,
'alloc-refill': 0,
'alloc-slow': 48,
'alloc-slow-high-order': 0,
'alloc-waive': 0,
'recycle-cache-full': 0,
'recycle-cached': 0,
'recycle-released-refcnt': 0,
'recycle-ring': 0,
'recycle-ring-full': 0},
{'info': {'id': 18, 'ifindex': 2},
'alloc-empty': 66,
'alloc-fast': 11811,
'alloc-refill': 35,
'alloc-slow': 66,
'alloc-slow-high-order': 0,
'alloc-waive': 0,
'recycle-cache-full': 1145,
'recycle-cached': 6541,
'recycle-released-refcnt': 0,
'recycle-ring': 1275,
'recycle-ring-full': 0},
{'info': {'id': 17, 'ifindex': 2},
'alloc-empty': 73,
'alloc-fast': 62099,
'alloc-refill': 413,
...
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:38 +03:00
# include <net/page_pool/helpers.h>
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
# include <net/sock.h>
2023-11-27 02:07:29 +03:00
# include "page_pool_priv.h"
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
# include "netdev-genl-gen.h"
2023-11-27 02:07:29 +03:00
static DEFINE_XARRAY_FLAGS ( page_pools , XA_FLAGS_ALLOC1 ) ;
2023-11-27 02:07:30 +03:00
/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
* Ordering : inside rtnl_lock
*/
2023-11-27 02:07:29 +03:00
static DEFINE_MUTEX ( page_pools_lock ) ;
2023-11-27 02:07:30 +03:00
/* Page pools are only reachable from user space (via netlink) if they are
* linked to a netdev at creation time . Following page pool " visibility "
* states are possible :
* - normal
* - user . list : linked to real netdev , netdev : real netdev
* - orphaned - real netdev has disappeared
* - user . list : linked to lo , netdev : lo
* - invisible - either ( a ) created without netdev linking , ( b ) unlisted due
* to error , or ( c ) the entire namespace which owned this pool disappeared
* - user . list : unhashed , netdev : unknown
*/
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
typedef int ( * pp_nl_fill_cb ) ( struct sk_buff * rsp , const struct page_pool * pool ,
const struct genl_info * info ) ;
static int
netdev_nl_page_pool_get_do ( struct genl_info * info , u32 id , pp_nl_fill_cb fill )
{
struct page_pool * pool ;
struct sk_buff * rsp ;
int err ;
mutex_lock ( & page_pools_lock ) ;
pool = xa_load ( & page_pools , id ) ;
if ( ! pool | | hlist_unhashed ( & pool - > user . list ) | |
! net_eq ( dev_net ( pool - > slow . netdev ) , genl_info_net ( info ) ) ) {
err = - ENOENT ;
goto err_unlock ;
}
rsp = genlmsg_new ( GENLMSG_DEFAULT_SIZE , GFP_KERNEL ) ;
if ( ! rsp ) {
err = - ENOMEM ;
goto err_unlock ;
}
err = fill ( rsp , pool , info ) ;
if ( err )
goto err_free_msg ;
mutex_unlock ( & page_pools_lock ) ;
return genlmsg_reply ( rsp , info ) ;
err_free_msg :
nlmsg_free ( rsp ) ;
err_unlock :
mutex_unlock ( & page_pools_lock ) ;
return err ;
}
struct page_pool_dump_cb {
unsigned long ifindex ;
u32 pp_id ;
} ;
static int
netdev_nl_page_pool_get_dump ( struct sk_buff * skb , struct netlink_callback * cb ,
pp_nl_fill_cb fill )
{
struct page_pool_dump_cb * state = ( void * ) cb - > ctx ;
const struct genl_info * info = genl_info_dump ( cb ) ;
struct net * net = sock_net ( skb - > sk ) ;
struct net_device * netdev ;
struct page_pool * pool ;
int err = 0 ;
rtnl_lock ( ) ;
mutex_lock ( & page_pools_lock ) ;
for_each_netdev_dump ( net , netdev , state - > ifindex ) {
hlist_for_each_entry ( pool , & netdev - > page_pools , user . list ) {
if ( state - > pp_id & & state - > pp_id < pool - > user . id )
continue ;
state - > pp_id = pool - > user . id ;
err = fill ( skb , pool , info ) ;
if ( err )
break ;
}
state - > pp_id = 0 ;
}
mutex_unlock ( & page_pools_lock ) ;
rtnl_unlock ( ) ;
if ( skb - > len & & err = = - EMSGSIZE )
return skb - > len ;
return err ;
}
net: page_pool: expose page pool stats via netlink
Dump the stats into netlink. More clever approaches
like dumping the stats per-CPU for each CPU individually
to see where the packets get consumed can be implemented
in the future.
A trimmed example from a real (but recently booted system):
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-stats-get
[{'info': {'id': 19, 'ifindex': 2},
'alloc-empty': 48,
'alloc-fast': 3024,
'alloc-refill': 0,
'alloc-slow': 48,
'alloc-slow-high-order': 0,
'alloc-waive': 0,
'recycle-cache-full': 0,
'recycle-cached': 0,
'recycle-released-refcnt': 0,
'recycle-ring': 0,
'recycle-ring-full': 0},
{'info': {'id': 18, 'ifindex': 2},
'alloc-empty': 66,
'alloc-fast': 11811,
'alloc-refill': 35,
'alloc-slow': 66,
'alloc-slow-high-order': 0,
'alloc-waive': 0,
'recycle-cache-full': 1145,
'recycle-cached': 6541,
'recycle-released-refcnt': 0,
'recycle-ring': 1275,
'recycle-ring-full': 0},
{'info': {'id': 17, 'ifindex': 2},
'alloc-empty': 73,
'alloc-fast': 62099,
'alloc-refill': 413,
...
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:38 +03:00
static int
page_pool_nl_stats_fill ( struct sk_buff * rsp , const struct page_pool * pool ,
const struct genl_info * info )
{
# ifdef CONFIG_PAGE_POOL_STATS
struct page_pool_stats stats = { } ;
struct nlattr * nest ;
void * hdr ;
if ( ! page_pool_get_stats ( pool , & stats ) )
return 0 ;
hdr = genlmsg_iput ( rsp , info ) ;
if ( ! hdr )
return - EMSGSIZE ;
nest = nla_nest_start ( rsp , NETDEV_A_PAGE_POOL_STATS_INFO ) ;
if ( nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_ID , pool - > user . id ) | |
( pool - > slow . netdev - > ifindex ! = LOOPBACK_IFINDEX & &
nla_put_u32 ( rsp , NETDEV_A_PAGE_POOL_IFINDEX ,
pool - > slow . netdev - > ifindex ) ) )
goto err_cancel_nest ;
nla_nest_end ( rsp , nest ) ;
if ( nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST ,
stats . alloc_stats . fast ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW ,
stats . alloc_stats . slow ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER ,
stats . alloc_stats . slow_high_order ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY ,
stats . alloc_stats . empty ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL ,
stats . alloc_stats . refill ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE ,
stats . alloc_stats . waive ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED ,
stats . recycle_stats . cached ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL ,
stats . recycle_stats . cache_full ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING ,
stats . recycle_stats . ring ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL ,
stats . recycle_stats . ring_full ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT ,
stats . recycle_stats . released_refcnt ) )
goto err_cancel_msg ;
genlmsg_end ( rsp , hdr ) ;
return 0 ;
err_cancel_nest :
nla_nest_cancel ( rsp , nest ) ;
err_cancel_msg :
genlmsg_cancel ( rsp , hdr ) ;
return - EMSGSIZE ;
# else
GENL_SET_ERR_MSG ( info , " kernel built without CONFIG_PAGE_POOL_STATS " ) ;
return - EOPNOTSUPP ;
# endif
}
int netdev_nl_page_pool_stats_get_doit ( struct sk_buff * skb ,
struct genl_info * info )
{
struct nlattr * tb [ ARRAY_SIZE ( netdev_page_pool_info_nl_policy ) ] ;
struct nlattr * nest ;
int err ;
u32 id ;
if ( GENL_REQ_ATTR_CHECK ( info , NETDEV_A_PAGE_POOL_STATS_INFO ) )
return - EINVAL ;
nest = info - > attrs [ NETDEV_A_PAGE_POOL_STATS_INFO ] ;
err = nla_parse_nested ( tb , ARRAY_SIZE ( tb ) - 1 , nest ,
netdev_page_pool_info_nl_policy ,
info - > extack ) ;
if ( err )
return err ;
if ( NL_REQ_ATTR_CHECK ( info - > extack , nest , tb , NETDEV_A_PAGE_POOL_ID ) )
return - EINVAL ;
if ( tb [ NETDEV_A_PAGE_POOL_IFINDEX ] ) {
NL_SET_ERR_MSG_ATTR ( info - > extack ,
tb [ NETDEV_A_PAGE_POOL_IFINDEX ] ,
" selecting by ifindex not supported " ) ;
return - EINVAL ;
}
id = nla_get_uint ( tb [ NETDEV_A_PAGE_POOL_ID ] ) ;
return netdev_nl_page_pool_get_do ( info , id , page_pool_nl_stats_fill ) ;
}
int netdev_nl_page_pool_stats_get_dumpit ( struct sk_buff * skb ,
struct netlink_callback * cb )
{
return netdev_nl_page_pool_get_dump ( skb , cb , page_pool_nl_stats_fill ) ;
}
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
static int
page_pool_nl_fill ( struct sk_buff * rsp , const struct page_pool * pool ,
const struct genl_info * info )
{
2023-11-27 02:07:36 +03:00
size_t inflight , refsz ;
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
void * hdr ;
hdr = genlmsg_iput ( rsp , info ) ;
if ( ! hdr )
return - EMSGSIZE ;
if ( nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_ID , pool - > user . id ) )
goto err_cancel ;
if ( pool - > slow . netdev - > ifindex ! = LOOPBACK_IFINDEX & &
nla_put_u32 ( rsp , NETDEV_A_PAGE_POOL_IFINDEX ,
pool - > slow . netdev - > ifindex ) )
goto err_cancel ;
if ( pool - > user . napi_id & &
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_NAPI_ID , pool - > user . napi_id ) )
goto err_cancel ;
2023-11-27 02:07:36 +03:00
inflight = page_pool_inflight ( pool , false ) ;
refsz = PAGE_SIZE < < pool - > p . order ;
if ( nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_INFLIGHT , inflight ) | |
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_INFLIGHT_MEM ,
inflight * refsz ) )
goto err_cancel ;
2023-11-27 02:07:37 +03:00
if ( pool - > user . detach_time & &
nla_put_uint ( rsp , NETDEV_A_PAGE_POOL_DETACH_TIME ,
pool - > user . detach_time ) )
goto err_cancel ;
2023-11-27 02:07:36 +03:00
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
genlmsg_end ( rsp , hdr ) ;
return 0 ;
err_cancel :
genlmsg_cancel ( rsp , hdr ) ;
return - EMSGSIZE ;
}
2023-11-27 02:07:35 +03:00
static void netdev_nl_page_pool_event ( const struct page_pool * pool , u32 cmd )
{
struct genl_info info ;
struct sk_buff * ntf ;
struct net * net ;
lockdep_assert_held ( & page_pools_lock ) ;
/* 'invisible' page pools don't matter */
if ( hlist_unhashed ( & pool - > user . list ) )
return ;
net = dev_net ( pool - > slow . netdev ) ;
if ( ! genl_has_listeners ( & netdev_nl_family , net , NETDEV_NLGRP_PAGE_POOL ) )
return ;
genl_info_init_ntf ( & info , & netdev_nl_family , cmd ) ;
ntf = genlmsg_new ( GENLMSG_DEFAULT_SIZE , GFP_KERNEL ) ;
if ( ! ntf )
return ;
if ( page_pool_nl_fill ( ntf , pool , & info ) ) {
nlmsg_free ( ntf ) ;
return ;
}
genlmsg_multicast_netns ( & netdev_nl_family , net , ntf ,
0 , NETDEV_NLGRP_PAGE_POOL , GFP_KERNEL ) ;
}
net: page_pool: implement GET in the netlink API
Expose the very basic page pool information via netlink.
Example using ynl-py for a system with 9 queues:
$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
--dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
{'id': 18, 'ifindex': 2, 'napi-id': 146},
{'id': 17, 'ifindex': 2, 'napi-id': 145},
{'id': 16, 'ifindex': 2, 'napi-id': 144},
{'id': 15, 'ifindex': 2, 'napi-id': 143},
{'id': 14, 'ifindex': 2, 'napi-id': 142},
{'id': 13, 'ifindex': 2, 'napi-id': 141},
{'id': 12, 'ifindex': 2, 'napi-id': 140},
{'id': 11, 'ifindex': 2, 'napi-id': 139},
{'id': 10, 'ifindex': 2, 'napi-id': 138}]
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-27 02:07:34 +03:00
int netdev_nl_page_pool_get_doit ( struct sk_buff * skb , struct genl_info * info )
{
u32 id ;
if ( GENL_REQ_ATTR_CHECK ( info , NETDEV_A_PAGE_POOL_ID ) )
return - EINVAL ;
id = nla_get_uint ( info - > attrs [ NETDEV_A_PAGE_POOL_ID ] ) ;
return netdev_nl_page_pool_get_do ( info , id , page_pool_nl_fill ) ;
}
int netdev_nl_page_pool_get_dumpit ( struct sk_buff * skb ,
struct netlink_callback * cb )
{
return netdev_nl_page_pool_get_dump ( skb , cb , page_pool_nl_fill ) ;
}
2023-11-27 02:07:29 +03:00
int page_pool_list ( struct page_pool * pool )
{
static u32 id_alloc_next ;
int err ;
mutex_lock ( & page_pools_lock ) ;
err = xa_alloc_cyclic ( & page_pools , & pool - > user . id , pool , xa_limit_32b ,
& id_alloc_next , GFP_KERNEL ) ;
if ( err < 0 )
goto err_unlock ;
2023-11-30 12:22:59 +03:00
INIT_HLIST_NODE ( & pool - > user . list ) ;
2023-11-27 02:07:31 +03:00
if ( pool - > slow . netdev ) {
2023-11-27 02:07:30 +03:00
hlist_add_head ( & pool - > user . list ,
& pool - > slow . netdev - > page_pools ) ;
2023-11-27 02:07:31 +03:00
pool - > user . napi_id = pool - > p . napi ? pool - > p . napi - > napi_id : 0 ;
2023-11-27 02:07:35 +03:00
netdev_nl_page_pool_event ( pool , NETDEV_CMD_PAGE_POOL_ADD_NTF ) ;
2023-11-27 02:07:31 +03:00
}
2023-11-27 02:07:30 +03:00
2023-11-27 02:07:29 +03:00
mutex_unlock ( & page_pools_lock ) ;
return 0 ;
err_unlock :
mutex_unlock ( & page_pools_lock ) ;
return err ;
}
2023-11-27 02:07:37 +03:00
void page_pool_detached ( struct page_pool * pool )
{
mutex_lock ( & page_pools_lock ) ;
pool - > user . detach_time = ktime_get_boottime_seconds ( ) ;
netdev_nl_page_pool_event ( pool , NETDEV_CMD_PAGE_POOL_CHANGE_NTF ) ;
mutex_unlock ( & page_pools_lock ) ;
}
2023-11-27 02:07:29 +03:00
void page_pool_unlist ( struct page_pool * pool )
{
mutex_lock ( & page_pools_lock ) ;
2023-11-27 02:07:35 +03:00
netdev_nl_page_pool_event ( pool , NETDEV_CMD_PAGE_POOL_DEL_NTF ) ;
2023-11-27 02:07:29 +03:00
xa_erase ( & page_pools , pool - > user . id ) ;
2023-11-30 12:22:59 +03:00
if ( ! hlist_unhashed ( & pool - > user . list ) )
hlist_del ( & pool - > user . list ) ;
2023-11-27 02:07:30 +03:00
mutex_unlock ( & page_pools_lock ) ;
}
static void page_pool_unreg_netdev_wipe ( struct net_device * netdev )
{
struct page_pool * pool ;
struct hlist_node * n ;
mutex_lock ( & page_pools_lock ) ;
hlist_for_each_entry_safe ( pool , n , & netdev - > page_pools , user . list ) {
hlist_del_init ( & pool - > user . list ) ;
pool - > slow . netdev = NET_PTR_POISON ;
}
mutex_unlock ( & page_pools_lock ) ;
}
static void page_pool_unreg_netdev ( struct net_device * netdev )
{
struct page_pool * pool , * last ;
struct net_device * lo ;
lo = dev_net ( netdev ) - > loopback_dev ;
mutex_lock ( & page_pools_lock ) ;
last = NULL ;
hlist_for_each_entry ( pool , & netdev - > page_pools , user . list ) {
pool - > slow . netdev = lo ;
2023-11-27 02:07:35 +03:00
netdev_nl_page_pool_event ( pool ,
NETDEV_CMD_PAGE_POOL_CHANGE_NTF ) ;
2023-11-27 02:07:30 +03:00
last = pool ;
}
if ( last )
hlist_splice_init ( & netdev - > page_pools , & last - > user . list ,
& lo - > page_pools ) ;
2023-11-27 02:07:29 +03:00
mutex_unlock ( & page_pools_lock ) ;
}
2023-11-27 02:07:30 +03:00
static int
page_pool_netdevice_event ( struct notifier_block * nb ,
unsigned long event , void * ptr )
{
struct net_device * netdev = netdev_notifier_info_to_dev ( ptr ) ;
if ( event ! = NETDEV_UNREGISTER )
return NOTIFY_DONE ;
if ( hlist_empty ( & netdev - > page_pools ) )
return NOTIFY_OK ;
if ( netdev - > ifindex ! = LOOPBACK_IFINDEX )
page_pool_unreg_netdev ( netdev ) ;
else
page_pool_unreg_netdev_wipe ( netdev ) ;
return NOTIFY_OK ;
}
static struct notifier_block page_pool_netdevice_nb = {
. notifier_call = page_pool_netdevice_event ,
} ;
static int __init page_pool_user_init ( void )
{
return register_netdevice_notifier ( & page_pool_netdevice_nb ) ;
}
subsys_initcall ( page_pool_user_init ) ;