9 hotfixes. 5 are cc:stable, 4 either pertain to post-6.10 material or
aren't considered necessary for earlier kernels. 5 are MM and 4 are non-MM. No identifiable theme here - please see the individual changelogs. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZrQhyAAKCRDdBJ7gKXxA jvLLAP46sQ/HspAbx+5JoeKBTiX6XW4Hfd+MAk++EaTAyAhnxQD+Mfq7rPOIHm/G wiXPVvLO8FEx0lbq06rnXvdotaWFrQg= =mlE4 -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2024-08-07-18-32' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Nine hotfixes. Five are cc:stable, the others either pertain to post-6.10 material or aren't considered necessary for earlier kernels. Five are MM and four are non-MM. No identifiable theme here - please see the individual changelogs" * tag 'mm-hotfixes-stable-2024-08-07-18-32' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: padata: Fix possible divide-by-0 panic in padata_mt_helper() mailmap: update entry for David Heidelberg memcg: protect concurrent access to mem_cgroup_idr mm: shmem: fix incorrect aligned index when checking conflicts mm: shmem: avoid allocating huge pages larger than MAX_PAGECACHE_ORDER for shmem mm: list_lru: fix UAF for memory cgroup kcov: properly check for softirq context MAINTAINERS: Update LTP members and web selftests: mm: add s390 to ARCH check
This commit is contained in:
commit
660e4b18a7
1
.mailmap
1
.mailmap
@ -166,6 +166,7 @@ Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
|
|||||||
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
|
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
|
||||||
David Brownell <david-b@pacbell.net>
|
David Brownell <david-b@pacbell.net>
|
||||||
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
|
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
|
||||||
|
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
|
||||||
David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
|
David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
|
||||||
David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
|
David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
|
||||||
David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
|
David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
|
||||||
|
@ -13324,14 +13324,16 @@ F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
|
|||||||
F: drivers/i2c/muxes/i2c-mux-ltc4306.c
|
F: drivers/i2c/muxes/i2c-mux-ltc4306.c
|
||||||
|
|
||||||
LTP (Linux Test Project)
|
LTP (Linux Test Project)
|
||||||
|
M: Andrea Cervesato <andrea.cervesato@suse.com>
|
||||||
M: Cyril Hrubis <chrubis@suse.cz>
|
M: Cyril Hrubis <chrubis@suse.cz>
|
||||||
M: Jan Stancek <jstancek@redhat.com>
|
M: Jan Stancek <jstancek@redhat.com>
|
||||||
M: Petr Vorel <pvorel@suse.cz>
|
M: Petr Vorel <pvorel@suse.cz>
|
||||||
M: Li Wang <liwang@redhat.com>
|
M: Li Wang <liwang@redhat.com>
|
||||||
M: Yang Xu <xuyang2018.jy@fujitsu.com>
|
M: Yang Xu <xuyang2018.jy@fujitsu.com>
|
||||||
|
M: Xiao Yang <yangx.jy@fujitsu.com>
|
||||||
L: ltp@lists.linux.it (subscribers-only)
|
L: ltp@lists.linux.it (subscribers-only)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: http://linux-test-project.github.io/
|
W: https://linux-test-project.readthedocs.io/
|
||||||
T: git https://github.com/linux-test-project/ltp.git
|
T: git https://github.com/linux-test-project/ltp.git
|
||||||
|
|
||||||
LTR390 AMBIENT/UV LIGHT SENSOR DRIVER
|
LTR390 AMBIENT/UV LIGHT SENSOR DRIVER
|
||||||
|
@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
|
|||||||
kmsan_unpoison_memory(&area->list, sizeof(area->list));
|
kmsan_unpoison_memory(&area->list, sizeof(area->list));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unlike in_serving_softirq(), this function returns false when called during
|
||||||
|
* a hardirq or an NMI that happened in the softirq context.
|
||||||
|
*/
|
||||||
|
static inline bool in_softirq_really(void)
|
||||||
|
{
|
||||||
|
return in_serving_softirq() && !in_hardirq() && !in_nmi();
|
||||||
|
}
|
||||||
|
|
||||||
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
||||||
{
|
{
|
||||||
unsigned int mode;
|
unsigned int mode;
|
||||||
@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
|
|||||||
* so we ignore code executed in interrupts, unless we are in a remote
|
* so we ignore code executed in interrupts, unless we are in a remote
|
||||||
* coverage collection section in a softirq.
|
* coverage collection section in a softirq.
|
||||||
*/
|
*/
|
||||||
if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
|
if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
|
||||||
return false;
|
return false;
|
||||||
mode = READ_ONCE(t->kcov_mode);
|
mode = READ_ONCE(t->kcov_mode);
|
||||||
/*
|
/*
|
||||||
@ -849,7 +858,7 @@ void kcov_remote_start(u64 handle)
|
|||||||
|
|
||||||
if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
|
if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
|
||||||
return;
|
return;
|
||||||
if (!in_task() && !in_serving_softirq())
|
if (!in_task() && !in_softirq_really())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
||||||
@ -991,7 +1000,7 @@ void kcov_remote_stop(void)
|
|||||||
int sequence;
|
int sequence;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!in_task() && !in_serving_softirq())
|
if (!in_task() && !in_softirq_really())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
local_lock_irqsave(&kcov_percpu_data.lock, flags);
|
||||||
|
@ -517,6 +517,13 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
|
|||||||
ps.chunk_size = max(ps.chunk_size, job->min_chunk);
|
ps.chunk_size = max(ps.chunk_size, job->min_chunk);
|
||||||
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* chunk_size can be 0 if the caller sets min_chunk to 0. So force it
|
||||||
|
* to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
|
||||||
|
*/
|
||||||
|
if (!ps.chunk_size)
|
||||||
|
ps.chunk_size = 1U;
|
||||||
|
|
||||||
list_for_each_entry(pw, &works, pw_list)
|
list_for_each_entry(pw, &works, pw_list)
|
||||||
if (job->numa_aware) {
|
if (job->numa_aware) {
|
||||||
int old_node = atomic_read(&last_used_nid);
|
int old_node = atomic_read(&last_used_nid);
|
||||||
|
@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
|
/* The caller must ensure the memcg lifetime. */
|
||||||
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
||||||
struct mem_cgroup *memcg)
|
struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
|
|||||||
|
|
||||||
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
|
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
|
||||||
{
|
{
|
||||||
|
bool ret;
|
||||||
int nid = page_to_nid(virt_to_page(item));
|
int nid = page_to_nid(virt_to_page(item));
|
||||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
|
||||||
mem_cgroup_from_slab_obj(item) : NULL;
|
|
||||||
|
|
||||||
return list_lru_add(lru, item, nid, memcg);
|
if (list_lru_memcg_aware(lru)) {
|
||||||
|
rcu_read_lock();
|
||||||
|
ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||||
|
rcu_read_unlock();
|
||||||
|
} else {
|
||||||
|
ret = list_lru_add(lru, item, nid, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(list_lru_add_obj);
|
EXPORT_SYMBOL_GPL(list_lru_add_obj);
|
||||||
|
|
||||||
|
/* The caller must ensure the memcg lifetime. */
|
||||||
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
|
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
|
||||||
struct mem_cgroup *memcg)
|
struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
|
|||||||
|
|
||||||
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
|
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
|
||||||
{
|
{
|
||||||
|
bool ret;
|
||||||
int nid = page_to_nid(virt_to_page(item));
|
int nid = page_to_nid(virt_to_page(item));
|
||||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
|
||||||
mem_cgroup_from_slab_obj(item) : NULL;
|
|
||||||
|
|
||||||
return list_lru_del(lru, item, nid, memcg);
|
if (list_lru_memcg_aware(lru)) {
|
||||||
|
rcu_read_lock();
|
||||||
|
ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||||
|
rcu_read_unlock();
|
||||||
|
} else {
|
||||||
|
ret = list_lru_del(lru, item, nid, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(list_lru_del_obj);
|
EXPORT_SYMBOL_GPL(list_lru_del_obj);
|
||||||
|
|
||||||
|
@ -3386,11 +3386,28 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
|
|||||||
|
|
||||||
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
|
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
|
||||||
static DEFINE_IDR(mem_cgroup_idr);
|
static DEFINE_IDR(mem_cgroup_idr);
|
||||||
|
static DEFINE_SPINLOCK(memcg_idr_lock);
|
||||||
|
|
||||||
|
static int mem_cgroup_alloc_id(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
idr_preload(GFP_KERNEL);
|
||||||
|
spin_lock(&memcg_idr_lock);
|
||||||
|
ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
|
||||||
|
GFP_NOWAIT);
|
||||||
|
spin_unlock(&memcg_idr_lock);
|
||||||
|
idr_preload_end();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
|
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
if (memcg->id.id > 0) {
|
if (memcg->id.id > 0) {
|
||||||
|
spin_lock(&memcg_idr_lock);
|
||||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||||
|
spin_unlock(&memcg_idr_lock);
|
||||||
|
|
||||||
memcg->id.id = 0;
|
memcg->id.id = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3524,8 +3541,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
|
|||||||
if (!memcg)
|
if (!memcg)
|
||||||
return ERR_PTR(error);
|
return ERR_PTR(error);
|
||||||
|
|
||||||
memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
|
memcg->id.id = mem_cgroup_alloc_id();
|
||||||
1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
|
|
||||||
if (memcg->id.id < 0) {
|
if (memcg->id.id < 0) {
|
||||||
error = memcg->id.id;
|
error = memcg->id.id;
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -3667,7 +3683,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|||||||
* publish it here at the end of onlining. This matches the
|
* publish it here at the end of onlining. This matches the
|
||||||
* regular ID destruction during offlining.
|
* regular ID destruction during offlining.
|
||||||
*/
|
*/
|
||||||
|
spin_lock(&memcg_idr_lock);
|
||||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||||
|
spin_unlock(&memcg_idr_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
offline_kmem:
|
offline_kmem:
|
||||||
|
14
mm/shmem.c
14
mm/shmem.c
@ -1629,11 +1629,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
|||||||
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
|
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
|
||||||
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
|
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
|
||||||
unsigned long vm_flags = vma->vm_flags;
|
unsigned long vm_flags = vma->vm_flags;
|
||||||
/*
|
|
||||||
* Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
|
|
||||||
* are enabled for this vma.
|
|
||||||
*/
|
|
||||||
unsigned long orders = BIT(PMD_ORDER + 1) - 1;
|
|
||||||
loff_t i_size;
|
loff_t i_size;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
@ -1678,7 +1673,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
|||||||
if (global_huge)
|
if (global_huge)
|
||||||
mask |= READ_ONCE(huge_shmem_orders_inherit);
|
mask |= READ_ONCE(huge_shmem_orders_inherit);
|
||||||
|
|
||||||
return orders & mask;
|
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
|
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
|
||||||
@ -1686,6 +1681,7 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
|
|||||||
unsigned long orders)
|
unsigned long orders)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
pgoff_t aligned_index;
|
||||||
unsigned long pages;
|
unsigned long pages;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
@ -1697,9 +1693,9 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
|
|||||||
order = highest_order(orders);
|
order = highest_order(orders);
|
||||||
while (orders) {
|
while (orders) {
|
||||||
pages = 1UL << order;
|
pages = 1UL << order;
|
||||||
index = round_down(index, pages);
|
aligned_index = round_down(index, pages);
|
||||||
if (!xa_find(&mapping->i_pages, &index,
|
if (!xa_find(&mapping->i_pages, &aligned_index,
|
||||||
index + pages - 1, XA_PRESENT))
|
aligned_index + pages - 1, XA_PRESENT))
|
||||||
break;
|
break;
|
||||||
order = next_order(&orders, order);
|
order = next_order(&orders, order);
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ endif
|
|||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64))
|
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
|
||||||
TEST_GEN_FILES += va_high_addr_switch
|
TEST_GEN_FILES += va_high_addr_switch
|
||||||
TEST_GEN_FILES += virtual_address_range
|
TEST_GEN_FILES += virtual_address_range
|
||||||
TEST_GEN_FILES += write_to_hugetlbfs
|
TEST_GEN_FILES += write_to_hugetlbfs
|
||||||
|
Loading…
Reference in New Issue
Block a user