cb8e59cc87
Pull networking updates from David Miller: 1) Allow setting bluetooth L2CAP modes via socket option, from Luiz Augusto von Dentz. 2) Add GSO partial support to igc, from Sasha Neftin. 3) Several cleanups and improvements to r8169 from Heiner Kallweit. 4) Add IF_OPER_TESTING link state and use it when ethtool triggers a device self-test. From Andrew Lunn. 5) Start moving away from custom driver versions, use the globally defined kernel version instead, from Leon Romanovsky. 6) Support GRO vis gro_cells in DSA layer, from Alexander Lobakin. 7) Allow hard IRQ deferral during NAPI, from Eric Dumazet. 8) Add sriov and vf support to hinic, from Luo bin. 9) Support Media Redundancy Protocol (MRP) in the bridging code, from Horatiu Vultur. 10) Support netmap in the nft_nat code, from Pablo Neira Ayuso. 11) Allow UDPv6 encapsulation of ESP in the ipsec code, from Sabrina Dubroca. Also add ipv6 support for espintcp. 12) Lots of ReST conversions of the networking documentation, from Mauro Carvalho Chehab. 13) Support configuration of ethtool rxnfc flows in bcmgenet driver, from Doug Berger. 14) Allow to dump cgroup id and filter by it in inet_diag code, from Dmitry Yakunin. 15) Add infrastructure to export netlink attribute policies to userspace, from Johannes Berg. 16) Several optimizations to sch_fq scheduler, from Eric Dumazet. 17) Fallback to the default qdisc if qdisc init fails because otherwise a packet scheduler init failure will make a device inoperative. From Jesper Dangaard Brouer. 18) Several RISCV bpf jit optimizations, from Luke Nelson. 19) Correct the return type of the ->ndo_start_xmit() method in several drivers, it's netdev_tx_t but many drivers were using 'int'. From Yunjian Wang. 20) Add an ethtool interface for PHY master/slave config, from Oleksij Rempel. 21) Add BPF iterators, from Yonghang Song. 22) Add cable test infrastructure, including ethool interfaces, from Andrew Lunn. Marvell PHY driver is the first to support this facility. 23) Remove zero-length arrays all over, from Gustavo A. R. Silva. 24) Calculate and maintain an explicit frame size in XDP, from Jesper Dangaard Brouer. 25) Add CAP_BPF, from Alexei Starovoitov. 26) Support terse dumps in the packet scheduler, from Vlad Buslov. 27) Support XDP_TX bulking in dpaa2 driver, from Ioana Ciornei. 28) Add devm_register_netdev(), from Bartosz Golaszewski. 29) Minimize qdisc resets, from Cong Wang. 30) Get rid of kernel_getsockopt and kernel_setsockopt in order to eliminate set_fs/get_fs calls. From Christoph Hellwig. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2517 commits) selftests: net: ip_defrag: ignore EPERM net_failover: fixed rollback in net_failover_open() Revert "tipc: Fix potential tipc_aead refcnt leak in tipc_crypto_rcv" Revert "tipc: Fix potential tipc_node refcnt leak in tipc_rcv" vmxnet3: allow rx flow hash ops only when rss is enabled hinic: add set_channels ethtool_ops support selftests/bpf: Add a default $(CXX) value tools/bpf: Don't use $(COMPILE.c) bpf, selftests: Use bpf_probe_read_kernel s390/bpf: Use bcr 0,%0 as tail call nop filler s390/bpf: Maintain 8-byte stack alignment selftests/bpf: Fix verifier test selftests/bpf: Fix sample_cnt shared between two threads bpf, selftests: Adapt cls_redirect to call csum_level helper bpf: Add csum_level helper for fixing up csum levels bpf: Fix up bpf_skb_adjust_room helper's skb csum setting sfc: add missing annotation for efx_ef10_try_update_nic_stats_vf() crypto/chtls: IPv6 support for inline TLS Crypto/chcr: Fixes a coccinile check error Crypto/chcr: Fixes compilations warnings ...
260 lines
5.6 KiB
C
260 lines
5.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Performance events callchain code, extracted from core.c:
|
|
*
|
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
|
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
*/
|
|
|
|
#include <linux/perf_event.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
#include "internal.h"
|
|
|
|
struct callchain_cpus_entries {
|
|
struct rcu_head rcu_head;
|
|
struct perf_callchain_entry *cpu_entries[];
|
|
};
|
|
|
|
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
|
|
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
|
|
|
|
static inline size_t perf_callchain_entry__sizeof(void)
|
|
{
|
|
return (sizeof(struct perf_callchain_entry) +
|
|
sizeof(__u64) * (sysctl_perf_event_max_stack +
|
|
sysctl_perf_event_max_contexts_per_stack));
|
|
}
|
|
|
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
|
static atomic_t nr_callchain_events;
|
|
static DEFINE_MUTEX(callchain_mutex);
|
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
|
|
|
|
|
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
static void release_callchain_buffers_rcu(struct rcu_head *head)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
int cpu;
|
|
|
|
entries = container_of(head, struct callchain_cpus_entries, rcu_head);
|
|
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
|
|
kfree(entries);
|
|
}
|
|
|
|
static void release_callchain_buffers(void)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
entries = callchain_cpus_entries;
|
|
RCU_INIT_POINTER(callchain_cpus_entries, NULL);
|
|
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
|
|
}
|
|
|
|
static int alloc_callchain_buffers(void)
|
|
{
|
|
int cpu;
|
|
int size;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
/*
|
|
* We can't use the percpu allocation API for data that can be
|
|
* accessed from NMI. Use a temporary manual per cpu allocation
|
|
* until that gets sorted out.
|
|
*/
|
|
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
|
|
|
|
entries = kzalloc(size, GFP_KERNEL);
|
|
if (!entries)
|
|
return -ENOMEM;
|
|
|
|
size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
|
|
cpu_to_node(cpu));
|
|
if (!entries->cpu_entries[cpu])
|
|
goto fail;
|
|
}
|
|
|
|
rcu_assign_pointer(callchain_cpus_entries, entries);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
kfree(entries);
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
int get_callchain_buffers(int event_max_stack)
|
|
{
|
|
int err = 0;
|
|
int count;
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
|
|
count = atomic_inc_return(&nr_callchain_events);
|
|
if (WARN_ON_ONCE(count < 1)) {
|
|
err = -EINVAL;
|
|
goto exit;
|
|
}
|
|
|
|
/*
|
|
* If requesting per event more than the global cap,
|
|
* return a different error to help userspace figure
|
|
* this out.
|
|
*
|
|
* And also do it here so that we have &callchain_mutex held.
|
|
*/
|
|
if (event_max_stack > sysctl_perf_event_max_stack) {
|
|
err = -EOVERFLOW;
|
|
goto exit;
|
|
}
|
|
|
|
if (count == 1)
|
|
err = alloc_callchain_buffers();
|
|
exit:
|
|
if (err)
|
|
atomic_dec(&nr_callchain_events);
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
void put_callchain_buffers(void)
|
|
{
|
|
if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
|
|
release_callchain_buffers();
|
|
mutex_unlock(&callchain_mutex);
|
|
}
|
|
}
|
|
|
|
static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
|
{
|
|
int cpu;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
|
if (*rctx == -1)
|
|
return NULL;
|
|
|
|
entries = rcu_dereference(callchain_cpus_entries);
|
|
if (!entries)
|
|
return NULL;
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
return (((void *)entries->cpu_entries[cpu]) +
|
|
(*rctx * perf_callchain_entry__sizeof()));
|
|
}
|
|
|
|
static void
|
|
put_callchain_entry(int rctx)
|
|
{
|
|
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
|
}
|
|
|
|
struct perf_callchain_entry *
|
|
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|
u32 max_stack, bool crosstask, bool add_mark)
|
|
{
|
|
struct perf_callchain_entry *entry;
|
|
struct perf_callchain_entry_ctx ctx;
|
|
int rctx;
|
|
|
|
entry = get_callchain_entry(&rctx);
|
|
if (rctx == -1)
|
|
return NULL;
|
|
|
|
if (!entry)
|
|
goto exit_put;
|
|
|
|
ctx.entry = entry;
|
|
ctx.max_stack = max_stack;
|
|
ctx.nr = entry->nr = init_nr;
|
|
ctx.contexts = 0;
|
|
ctx.contexts_maxed = false;
|
|
|
|
if (kernel && !user_mode(regs)) {
|
|
if (add_mark)
|
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
|
|
perf_callchain_kernel(&ctx, regs);
|
|
}
|
|
|
|
if (user) {
|
|
if (!user_mode(regs)) {
|
|
if (current->mm)
|
|
regs = task_pt_regs(current);
|
|
else
|
|
regs = NULL;
|
|
}
|
|
|
|
if (regs) {
|
|
mm_segment_t fs;
|
|
|
|
if (crosstask)
|
|
goto exit_put;
|
|
|
|
if (add_mark)
|
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
|
|
|
|
fs = get_fs();
|
|
set_fs(USER_DS);
|
|
perf_callchain_user(&ctx, regs);
|
|
set_fs(fs);
|
|
}
|
|
}
|
|
|
|
exit_put:
|
|
put_callchain_entry(rctx);
|
|
|
|
return entry;
|
|
}
|
|
|
|
/*
|
|
* Used for sysctl_perf_event_max_stack and
|
|
* sysctl_perf_event_max_contexts_per_stack.
|
|
*/
|
|
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
int *value = table->data;
|
|
int new_value = *value, ret;
|
|
struct ctl_table new_table = *table;
|
|
|
|
new_table.data = &new_value;
|
|
ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
|
|
if (ret || !write)
|
|
return ret;
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
if (atomic_read(&nr_callchain_events))
|
|
ret = -EBUSY;
|
|
else
|
|
*value = new_value;
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
return ret;
|
|
}
|