bpf: Add support for non-fix-size percpu mem allocation
This is needed for later percpu mem allocation when the allocation is done by bpf program. For such cases, a global bpf_global_percpu_ma is added where a flexible allocation size is needed. Signed-off-by: Yonghong Song <yonghong.song@linux.dev> Link: https://lore.kernel.org/r/20230827152734.1995725-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
3903802bb9
commit
41a5db8d81
@ -55,8 +55,8 @@ struct cgroup;
|
||||
extern struct idr btf_idr;
|
||||
extern spinlock_t btf_idr_lock;
|
||||
extern struct kobject *btf_kobj;
|
||||
extern struct bpf_mem_alloc bpf_global_ma;
|
||||
extern bool bpf_global_ma_set;
|
||||
extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
|
||||
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
|
||||
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
||||
|
@ -64,8 +64,8 @@
|
||||
#define OFF insn->off
|
||||
#define IMM insn->imm
|
||||
|
||||
struct bpf_mem_alloc bpf_global_ma;
|
||||
bool bpf_global_ma_set;
|
||||
struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
|
||||
/* No hurry in this branch
|
||||
*
|
||||
@ -2921,7 +2921,9 @@ static int __init bpf_global_ma_init(void)
|
||||
|
||||
ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
|
||||
bpf_global_ma_set = !ret;
|
||||
return ret;
|
||||
ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
|
||||
bpf_global_percpu_ma_set = !ret;
|
||||
return !bpf_global_ma_set || !bpf_global_percpu_ma_set;
|
||||
}
|
||||
late_initcall(bpf_global_ma_init);
|
||||
#endif
|
||||
|
@ -499,15 +499,16 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
int cpu, i, unit_size, percpu_size = 0;
|
||||
|
||||
/* room for llist_node and per-cpu pointer */
|
||||
if (percpu)
|
||||
percpu_size = LLIST_NODE_SZ + sizeof(void *);
|
||||
|
||||
if (size) {
|
||||
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
||||
if (!pc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (percpu)
|
||||
/* room for llist_node and per-cpu pointer */
|
||||
percpu_size = LLIST_NODE_SZ + sizeof(void *);
|
||||
else
|
||||
if (!percpu)
|
||||
size += LLIST_NODE_SZ; /* room for llist_node */
|
||||
unit_size = size;
|
||||
|
||||
@ -527,10 +528,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* size == 0 && percpu is an invalid combination */
|
||||
if (WARN_ON_ONCE(percpu))
|
||||
return -EINVAL;
|
||||
|
||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||
if (!pcc)
|
||||
return -ENOMEM;
|
||||
@ -543,6 +540,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
c = &cc->cache[i];
|
||||
c->unit_size = sizes[i];
|
||||
c->objcg = objcg;
|
||||
c->percpu_size = percpu_size;
|
||||
c->tgt = c;
|
||||
prefill_mem_cache(c, cpu);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user