9 hotfixes. 5 are cc:stable, 4 either pertain to post-6.10 material or
aren't considered necessary for earlier kernels. 5 are MM and 4 are non-MM. No identifiable theme here - please see the individual changelogs. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZrQhyAAKCRDdBJ7gKXxA jvLLAP46sQ/HspAbx+5JoeKBTiX6XW4Hfd+MAk++EaTAyAhnxQD+Mfq7rPOIHm/G wiXPVvLO8FEx0lbq06rnXvdotaWFrQg= =mlE4 -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2024-08-07-18-32' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Nine hotfixes. Five are cc:stable, the others either pertain to post-6.10 material or aren't considered necessary for earlier kernels. Five are MM and four are non-MM. No identifiable theme here - please see the individual changelogs" * tag 'mm-hotfixes-stable-2024-08-07-18-32' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: padata: Fix possible divide-by-0 panic in padata_mt_helper() mailmap: update entry for David Heidelberg memcg: protect concurrent access to mem_cgroup_idr mm: shmem: fix incorrect aligned index when checking conflicts mm: shmem: avoid allocating huge pages larger than MAX_PAGECACHE_ORDER for shmem mm: list_lru: fix UAF for memory cgroup kcov: properly check for softirq context MAINTAINERS: Update LTP members and web selftests: mm: add s390 to ARCH check
This commit is contained in:
commit
660e4b18a7
1
.mailmap
1
.mailmap
@ -166,6 +166,7 @@ Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
|
||||
David Brownell <david-b@pacbell.net>
|
||||
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
|
||||
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
|
||||
David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
|
||||
David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
|
||||
David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
|
||||
|
@ -13324,14 +13324,16 @@ F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
|
||||
F: drivers/i2c/muxes/i2c-mux-ltc4306.c
|
||||
|
||||
LTP (Linux Test Project)
|
||||
M: Andrea Cervesato <andrea.cervesato@suse.com>
|
||||
M: Cyril Hrubis <chrubis@suse.cz>
|
||||
M: Jan Stancek <jstancek@redhat.com>
|
||||
M: Petr Vorel <pvorel@suse.cz>
|
||||
M: Li Wang <liwang@redhat.com>
|
||||
M: Yang Xu <xuyang2018.jy@fujitsu.com>
|
||||
M: Xiao Yang <yangx.jy@fujitsu.com>
|
||||
L: ltp@lists.linux.it (subscribers-only)
|
||||
S: Maintained
|
||||
W: http://linux-test-project.github.io/
|
||||
W: https://linux-test-project.readthedocs.io/
|
||||
T: git https://github.com/linux-test-project/ltp.git
|
||||
|
||||
LTR390 AMBIENT/UV LIGHT SENSOR DRIVER
|
||||
|
@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
|
||||
kmsan_unpoison_memory(&area->list, sizeof(area->list));
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike in_serving_softirq(), this function returns false when called during
|
||||
* a hardirq or an NMI that happened in the softirq context.
|
||||
*/
|
||||
static inline bool in_softirq_really(void)
|
||||
{
|
||||
return in_serving_softirq() && !in_hardirq() && !in_nmi();
|
||||
}
|
||||
|
||||
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
||||
{
|
||||
unsigned int mode;
|
||||
@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
|
||||
* so we ignore code executed in interrupts, unless we are in a remote
|
||||
* coverage collection section in a softirq.
|
||||
*/
|
||||
if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
|
||||
if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
|
||||
return false;
|
||||
mode = READ_ONCE(t->kcov_mode);
|
||||
/*
|
||||
@ -849,7 +858,7 @@ void kcov_remote_start(u64 handle)
|
||||
|
||||
if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
|
||||
return;
|
||||
if (!in_task() && !in_serving_softirq())
|
||||
if (!in_task() && !in_softirq_really())
|
||||
return;
|
||||
|
||||
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
||||
@ -991,7 +1000,7 @@ void kcov_remote_stop(void)
|
||||
int sequence;
|
||||
unsigned long flags;
|
||||
|
||||
if (!in_task() && !in_serving_softirq())
|
||||
if (!in_task() && !in_softirq_really())
|
||||
return;
|
||||
|
||||
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
||||
|
@ -517,6 +517,13 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
|
||||
ps.chunk_size = max(ps.chunk_size, job->min_chunk);
|
||||
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
||||
|
||||
/*
|
||||
* chunk_size can be 0 if the caller sets min_chunk to 0. So force it
|
||||
* to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
|
||||
*/
|
||||
if (!ps.chunk_size)
|
||||
ps.chunk_size = 1U;
|
||||
|
||||
list_for_each_entry(pw, &works, pw_list)
|
||||
if (job->numa_aware) {
|
||||
int old_node = atomic_read(&last_used_nid);
|
||||
|
@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
||||
}
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
/* The caller must ensure the memcg lifetime. */
|
||||
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
|
||||
|
||||
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
bool ret;
|
||||
int nid = page_to_nid(virt_to_page(item));
|
||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
||||
mem_cgroup_from_slab_obj(item) : NULL;
|
||||
|
||||
return list_lru_add(lru, item, nid, memcg);
|
||||
if (list_lru_memcg_aware(lru)) {
|
||||
rcu_read_lock();
|
||||
ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
ret = list_lru_add(lru, item, nid, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_add_obj);
|
||||
|
||||
/* The caller must ensure the memcg lifetime. */
|
||||
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
|
||||
|
||||
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
bool ret;
|
||||
int nid = page_to_nid(virt_to_page(item));
|
||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
||||
mem_cgroup_from_slab_obj(item) : NULL;
|
||||
|
||||
return list_lru_del(lru, item, nid, memcg);
|
||||
if (list_lru_memcg_aware(lru)) {
|
||||
rcu_read_lock();
|
||||
ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
ret = list_lru_del(lru, item, nid, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_del_obj);
|
||||
|
||||
|
@ -3386,11 +3386,28 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
|
||||
|
||||
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
|
||||
static DEFINE_IDR(mem_cgroup_idr);
|
||||
static DEFINE_SPINLOCK(memcg_idr_lock);
|
||||
|
||||
static int mem_cgroup_alloc_id(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&memcg_idr_lock);
|
||||
ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
|
||||
GFP_NOWAIT);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
idr_preload_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg->id.id > 0) {
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
|
||||
memcg->id.id = 0;
|
||||
}
|
||||
}
|
||||
@ -3524,8 +3541,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
|
||||
if (!memcg)
|
||||
return ERR_PTR(error);
|
||||
|
||||
memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
|
||||
1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
|
||||
memcg->id.id = mem_cgroup_alloc_id();
|
||||
if (memcg->id.id < 0) {
|
||||
error = memcg->id.id;
|
||||
goto fail;
|
||||
@ -3667,7 +3683,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
||||
* publish it here at the end of onlining. This matches the
|
||||
* regular ID destruction during offlining.
|
||||
*/
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
|
||||
return 0;
|
||||
offline_kmem:
|
||||
|
14
mm/shmem.c
14
mm/shmem.c
@ -1629,11 +1629,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
||||
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
|
||||
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
|
||||
unsigned long vm_flags = vma->vm_flags;
|
||||
/*
|
||||
* Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
|
||||
* are enabled for this vma.
|
||||
*/
|
||||
unsigned long orders = BIT(PMD_ORDER + 1) - 1;
|
||||
loff_t i_size;
|
||||
int order;
|
||||
|
||||
@ -1678,7 +1673,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
||||
if (global_huge)
|
||||
mask |= READ_ONCE(huge_shmem_orders_inherit);
|
||||
|
||||
return orders & mask;
|
||||
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
|
||||
}
|
||||
|
||||
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
|
||||
@ -1686,6 +1681,7 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
|
||||
unsigned long orders)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
pgoff_t aligned_index;
|
||||
unsigned long pages;
|
||||
int order;
|
||||
|
||||
@ -1697,9 +1693,9 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
|
||||
order = highest_order(orders);
|
||||
while (orders) {
|
||||
pages = 1UL << order;
|
||||
index = round_down(index, pages);
|
||||
if (!xa_find(&mapping->i_pages, &index,
|
||||
index + pages - 1, XA_PRESENT))
|
||||
aligned_index = round_down(index, pages);
|
||||
if (!xa_find(&mapping->i_pages, &aligned_index,
|
||||
aligned_index + pages - 1, XA_PRESENT))
|
||||
break;
|
||||
order = next_order(&orders, order);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ endif
|
||||
|
||||
endif
|
||||
|
||||
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64))
|
||||
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
|
||||
TEST_GEN_FILES += va_high_addr_switch
|
||||
TEST_GEN_FILES += virtual_address_range
|
||||
TEST_GEN_FILES += write_to_hugetlbfs
|
||||
|
Loading…
Reference in New Issue
Block a user