Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@ -1772,16 +1772,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
|
||||
bitmap_from_u64(mask, stack_mask);
|
||||
for_each_set_bit(i, mask, 64) {
|
||||
if (i >= func->allocated_stack / BPF_REG_SIZE) {
|
||||
/* This can happen if backtracking
|
||||
* is propagating stack precision where
|
||||
* caller has larger stack frame
|
||||
* than callee, but backtrack_insn() should
|
||||
* have returned -ENOTSUPP.
|
||||
/* the sequence of instructions:
|
||||
* 2: (bf) r3 = r10
|
||||
* 3: (7b) *(u64 *)(r3 -8) = r0
|
||||
* 4: (79) r4 = *(u64 *)(r10 -8)
|
||||
* doesn't contain jmps. It's backtracked
|
||||
* as a single block.
|
||||
* During backtracking insn 3 is not recognized as
|
||||
* stack access, so at the end of backtracking
|
||||
* stack slot fp-8 is still marked in stack_mask.
|
||||
* However the parent state may not have accessed
|
||||
* fp-8 and it's "unallocated" stack space.
|
||||
* In such case fallback to conservative.
|
||||
*/
|
||||
verbose(env, "BUG spi %d stack_size %d\n",
|
||||
i, func->allocated_stack);
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
mark_all_scalars_precise(env, st);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (func->stack[i].slot_type[0] != STACK_SPILL) {
|
||||
|
@ -5255,8 +5255,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
||||
* if the parent has to be frozen, the child has too.
|
||||
*/
|
||||
cgrp->freezer.e_freeze = parent->freezer.e_freeze;
|
||||
if (cgrp->freezer.e_freeze)
|
||||
if (cgrp->freezer.e_freeze) {
|
||||
/*
|
||||
* Set the CGRP_FREEZE flag, so when a process will be
|
||||
* attached to the child cgroup, it will become frozen.
|
||||
* At this point the new cgroup is unpopulated, so we can
|
||||
* consider it frozen immediately.
|
||||
*/
|
||||
set_bit(CGRP_FREEZE, &cgrp->flags);
|
||||
set_bit(CGRP_FROZEN, &cgrp->flags);
|
||||
}
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
|
||||
|
@ -413,7 +413,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
|
||||
|
||||
int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint hw;
|
||||
struct arch_hw_breakpoint hw = { };
|
||||
int err;
|
||||
|
||||
err = reserve_bp_slot(bp);
|
||||
@ -461,7 +461,7 @@ int
|
||||
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
|
||||
bool check)
|
||||
{
|
||||
struct arch_hw_breakpoint hw;
|
||||
struct arch_hw_breakpoint hw = { };
|
||||
int err;
|
||||
|
||||
err = hw_breakpoint_parse(bp, attr, &hw);
|
||||
|
@ -2338,6 +2338,8 @@ struct mm_struct *copy_init_mm(void)
|
||||
*
|
||||
* It copies the process, and if successful kick-starts
|
||||
* it and waits for it to finish using the VM if required.
|
||||
*
|
||||
* args->exit_signal is expected to be checked for sanity by the caller.
|
||||
*/
|
||||
long _do_fork(struct kernel_clone_args *args)
|
||||
{
|
||||
@ -2562,6 +2564,14 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
|
||||
if (copy_from_user(&args, uargs, size))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Verify that higher 32bits of exit_signal are unset and that
|
||||
* it is a valid signal
|
||||
*/
|
||||
if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
|
||||
!valid_signal(args.exit_signal)))
|
||||
return -EINVAL;
|
||||
|
||||
*kargs = (struct kernel_clone_args){
|
||||
.flags = args.flags,
|
||||
.pidfd = u64_to_user_ptr(args.pidfd),
|
||||
|
@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
|
||||
irq = find_first_bit(irqs_resend, nr_irqs);
|
||||
clear_bit(irq, irqs_resend);
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
local_irq_disable();
|
||||
desc->handle_irq(desc);
|
||||
local_irq_enable();
|
||||
|
@ -5105,37 +5105,40 @@ out_unlock:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int sched_read_attr(struct sched_attr __user *uattr,
|
||||
struct sched_attr *attr,
|
||||
unsigned int usize)
|
||||
/*
|
||||
* Copy the kernel size attribute structure (which might be larger
|
||||
* than what user-space knows about) to user-space.
|
||||
*
|
||||
* Note that all cases are valid: user-space buffer can be larger or
|
||||
* smaller than the kernel-space buffer. The usual case is that both
|
||||
* have the same size.
|
||||
*/
|
||||
static int
|
||||
sched_attr_copy_to_user(struct sched_attr __user *uattr,
|
||||
struct sched_attr *kattr,
|
||||
unsigned int usize)
|
||||
{
|
||||
int ret;
|
||||
unsigned int ksize = sizeof(*kattr);
|
||||
|
||||
if (!access_ok(uattr, usize))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* If we're handed a smaller struct than we know of,
|
||||
* ensure all the unknown bits are 0 - i.e. old
|
||||
* user-space does not get uncomplete information.
|
||||
* sched_getattr() ABI forwards and backwards compatibility:
|
||||
*
|
||||
* If usize == ksize then we just copy everything to user-space and all is good.
|
||||
*
|
||||
* If usize < ksize then we only copy as much as user-space has space for,
|
||||
* this keeps ABI compatibility as well. We skip the rest.
|
||||
*
|
||||
* If usize > ksize then user-space is using a newer version of the ABI,
|
||||
* which part the kernel doesn't know about. Just ignore it - tooling can
|
||||
* detect the kernel's knowledge of attributes from the attr->size value
|
||||
* which is set to ksize in this case.
|
||||
*/
|
||||
if (usize < sizeof(*attr)) {
|
||||
unsigned char *addr;
|
||||
unsigned char *end;
|
||||
kattr->size = min(usize, ksize);
|
||||
|
||||
addr = (void *)attr + usize;
|
||||
end = (void *)attr + sizeof(*attr);
|
||||
|
||||
for (; addr < end; addr++) {
|
||||
if (*addr)
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
attr->size = usize;
|
||||
}
|
||||
|
||||
ret = copy_to_user(uattr, attr, attr->size);
|
||||
if (ret)
|
||||
if (copy_to_user(uattr, kattr, kattr->size))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
|
||||
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
|
||||
* @pid: the pid in question.
|
||||
* @uattr: structure containing the extended parameters.
|
||||
* @size: sizeof(attr) for fwd/bwd comp.
|
||||
* @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
|
||||
* @flags: for future extension.
|
||||
*/
|
||||
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
|
||||
unsigned int, size, unsigned int, flags)
|
||||
unsigned int, usize, unsigned int, flags)
|
||||
{
|
||||
struct sched_attr attr = {
|
||||
.size = sizeof(struct sched_attr),
|
||||
};
|
||||
struct sched_attr kattr = { };
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
|
||||
if (!uattr || pid < 0 || size > PAGE_SIZE ||
|
||||
size < SCHED_ATTR_SIZE_VER0 || flags)
|
||||
if (!uattr || pid < 0 || usize > PAGE_SIZE ||
|
||||
usize < SCHED_ATTR_SIZE_VER0 || flags)
|
||||
return -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
|
||||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
attr.sched_policy = p->policy;
|
||||
kattr.sched_policy = p->policy;
|
||||
if (p->sched_reset_on_fork)
|
||||
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
||||
kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
||||
if (task_has_dl_policy(p))
|
||||
__getparam_dl(p, &attr);
|
||||
__getparam_dl(p, &kattr);
|
||||
else if (task_has_rt_policy(p))
|
||||
attr.sched_priority = p->rt_priority;
|
||||
kattr.sched_priority = p->rt_priority;
|
||||
else
|
||||
attr.sched_nice = task_nice(p);
|
||||
kattr.sched_nice = task_nice(p);
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
|
||||
attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
|
||||
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
|
||||
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
|
||||
#endif
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
retval = sched_read_attr(uattr, &attr, size);
|
||||
return retval;
|
||||
return sched_attr_copy_to_user(uattr, &kattr, usize);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
||||
if (likely(cfs_rq->runtime_remaining > 0))
|
||||
return;
|
||||
|
||||
if (cfs_rq->throttled)
|
||||
return;
|
||||
/*
|
||||
* if we're unable to extend our runtime we resched so that the active
|
||||
* hierarchy can be throttled
|
||||
@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
|
||||
if (!cfs_rq_throttled(cfs_rq))
|
||||
goto next;
|
||||
|
||||
/* By the above check, this should never be true */
|
||||
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
|
||||
|
||||
runtime = -cfs_rq->runtime_remaining + 1;
|
||||
if (runtime > remaining)
|
||||
runtime = remaining;
|
||||
|
Reference in New Issue
Block a user