Seven hotfixes. Four are cc:stable and the remainder pertain to issues
which were introduced in the current merge window. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZPd5KAAKCRDdBJ7gKXxA jqIrAPoCqnQwOA577hJ3B1iEZnbYC0dlf5Rsk+uS/2HFnVeLhAD6A0uFOIE11ZQR I9AU7NDtu8NYkh9Adz+cRDeLNWbRSAo= =EFfq -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-09-05-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Seven hotfixes. Four are cc:stable and the remainder pertain to issues which were introduced in the current merge window" * tag 'mm-hotfixes-stable-2023-09-05-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: sparc64: add missing initialization of folio in tlb_batch_add() mm: memory-failure: use rcu lock instead of tasklist_lock when collect_procs() revert "memfd: improve userspace warnings for missing exec-related flags". rcu: dump vmalloc memory info safely mm/vmalloc: add a safer version of find_vm_area() for debug tools/mm: fix undefined reference to pthread_once memcontrol: ensure memcg acquired by id is properly set up
This commit is contained in:
commit
3c5c9b7cfd
@ -128,6 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
|||||||
goto no_cache_flush;
|
goto no_cache_flush;
|
||||||
|
|
||||||
/* A real file page? */
|
/* A real file page? */
|
||||||
|
folio = page_folio(page);
|
||||||
mapping = folio_flush_mapping(folio);
|
mapping = folio_flush_mapping(folio);
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
goto no_cache_flush;
|
goto no_cache_flush;
|
||||||
|
@ -121,9 +121,6 @@
|
|||||||
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->private_lock (zap_pte_range->block_dirty_folio)
|
* ->private_lock (zap_pte_range->block_dirty_folio)
|
||||||
*
|
|
||||||
* ->i_mmap_rwsem
|
|
||||||
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void page_cache_delete(struct address_space *mapping,
|
static void page_cache_delete(struct address_space *mapping,
|
||||||
|
4
mm/ksm.c
4
mm/ksm.c
@ -2925,7 +2925,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
|
|||||||
struct anon_vma *av = rmap_item->anon_vma;
|
struct anon_vma *av = rmap_item->anon_vma;
|
||||||
|
|
||||||
anon_vma_lock_read(av);
|
anon_vma_lock_read(av);
|
||||||
read_lock(&tasklist_lock);
|
rcu_read_lock();
|
||||||
for_each_process(tsk) {
|
for_each_process(tsk) {
|
||||||
struct anon_vma_chain *vmac;
|
struct anon_vma_chain *vmac;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
@ -2944,7 +2944,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
read_unlock(&tasklist_lock);
|
rcu_read_unlock();
|
||||||
anon_vma_unlock_read(av);
|
anon_vma_unlock_read(av);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5326,7 +5326,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
|||||||
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
|
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
|
||||||
memcg->deferred_split_queue.split_queue_len = 0;
|
memcg->deferred_split_queue.split_queue_len = 0;
|
||||||
#endif
|
#endif
|
||||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
|
||||||
lru_gen_init_memcg(memcg);
|
lru_gen_init_memcg(memcg);
|
||||||
return memcg;
|
return memcg;
|
||||||
fail:
|
fail:
|
||||||
@ -5398,14 +5397,27 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|||||||
if (alloc_shrinker_info(memcg))
|
if (alloc_shrinker_info(memcg))
|
||||||
goto offline_kmem;
|
goto offline_kmem;
|
||||||
|
|
||||||
/* Online state pins memcg ID, memcg ID pins CSS */
|
|
||||||
refcount_set(&memcg->id.ref, 1);
|
|
||||||
css_get(css);
|
|
||||||
|
|
||||||
if (unlikely(mem_cgroup_is_root(memcg)))
|
if (unlikely(mem_cgroup_is_root(memcg)))
|
||||||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
|
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
|
||||||
FLUSH_TIME);
|
FLUSH_TIME);
|
||||||
lru_gen_online_memcg(memcg);
|
lru_gen_online_memcg(memcg);
|
||||||
|
|
||||||
|
/* Online state pins memcg ID, memcg ID pins CSS */
|
||||||
|
refcount_set(&memcg->id.ref, 1);
|
||||||
|
css_get(css);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure mem_cgroup_from_id() works once we're fully online.
|
||||||
|
*
|
||||||
|
* We could do this earlier and require callers to filter with
|
||||||
|
* css_tryget_online(). But right now there are no users that
|
||||||
|
* need earlier access, and the workingset code relies on the
|
||||||
|
* cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
|
||||||
|
* publish it here at the end of onlining. This matches the
|
||||||
|
* regular ID destruction during offlining.
|
||||||
|
*/
|
||||||
|
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
offline_kmem:
|
offline_kmem:
|
||||||
memcg_offline_kmem(memcg);
|
memcg_offline_kmem(memcg);
|
||||||
|
@ -316,7 +316,7 @@ SYSCALL_DEFINE2(memfd_create,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
|
if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
|
||||||
pr_info_ratelimited(
|
pr_warn_once(
|
||||||
"%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
|
"%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
|
||||||
current->comm, task_pid_nr(current));
|
current->comm, task_pid_nr(current));
|
||||||
}
|
}
|
||||||
|
@ -547,8 +547,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
|
|||||||
* on behalf of the thread group. Return task_struct of the (first found)
|
* on behalf of the thread group. Return task_struct of the (first found)
|
||||||
* dedicated thread if found, and return NULL otherwise.
|
* dedicated thread if found, and return NULL otherwise.
|
||||||
*
|
*
|
||||||
* We already hold read_lock(&tasklist_lock) in the caller, so we don't
|
* We already hold rcu lock in the caller, so we don't have to call
|
||||||
* have to call rcu_read_lock/unlock() in this function.
|
* rcu_read_lock/unlock() in this function.
|
||||||
*/
|
*/
|
||||||
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
|
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
@ -609,7 +609,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
pgoff = page_to_pgoff(page);
|
pgoff = page_to_pgoff(page);
|
||||||
read_lock(&tasklist_lock);
|
rcu_read_lock();
|
||||||
for_each_process(tsk) {
|
for_each_process(tsk) {
|
||||||
struct anon_vma_chain *vmac;
|
struct anon_vma_chain *vmac;
|
||||||
struct task_struct *t = task_early_kill(tsk, force_early);
|
struct task_struct *t = task_early_kill(tsk, force_early);
|
||||||
@ -626,7 +626,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|||||||
add_to_kill_anon_file(t, page, vma, to_kill);
|
add_to_kill_anon_file(t, page, vma, to_kill);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
read_unlock(&tasklist_lock);
|
rcu_read_unlock();
|
||||||
anon_vma_unlock_read(av);
|
anon_vma_unlock_read(av);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,7 +642,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
|
|
||||||
i_mmap_lock_read(mapping);
|
i_mmap_lock_read(mapping);
|
||||||
read_lock(&tasklist_lock);
|
rcu_read_lock();
|
||||||
pgoff = page_to_pgoff(page);
|
pgoff = page_to_pgoff(page);
|
||||||
for_each_process(tsk) {
|
for_each_process(tsk) {
|
||||||
struct task_struct *t = task_early_kill(tsk, force_early);
|
struct task_struct *t = task_early_kill(tsk, force_early);
|
||||||
@ -662,7 +662,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|||||||
add_to_kill_anon_file(t, page, vma, to_kill);
|
add_to_kill_anon_file(t, page, vma, to_kill);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
read_unlock(&tasklist_lock);
|
rcu_read_unlock();
|
||||||
i_mmap_unlock_read(mapping);
|
i_mmap_unlock_read(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -685,7 +685,7 @@ static void collect_procs_fsdax(struct page *page,
|
|||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
|
|
||||||
i_mmap_lock_read(mapping);
|
i_mmap_lock_read(mapping);
|
||||||
read_lock(&tasklist_lock);
|
rcu_read_lock();
|
||||||
for_each_process(tsk) {
|
for_each_process(tsk) {
|
||||||
struct task_struct *t = task_early_kill(tsk, true);
|
struct task_struct *t = task_early_kill(tsk, true);
|
||||||
|
|
||||||
@ -696,7 +696,7 @@ static void collect_procs_fsdax(struct page *page,
|
|||||||
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
|
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
read_unlock(&tasklist_lock);
|
rcu_read_unlock();
|
||||||
i_mmap_unlock_read(mapping);
|
i_mmap_unlock_read(mapping);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_FS_DAX */
|
#endif /* CONFIG_FS_DAX */
|
||||||
|
@ -1068,7 +1068,9 @@ void mem_dump_obj(void *object)
|
|||||||
if (vmalloc_dump_obj(object))
|
if (vmalloc_dump_obj(object))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (virt_addr_valid(object))
|
if (is_vmalloc_addr(object))
|
||||||
|
type = "vmalloc memory";
|
||||||
|
else if (virt_addr_valid(object))
|
||||||
type = "non-slab/vmalloc memory";
|
type = "non-slab/vmalloc memory";
|
||||||
else if (object == NULL)
|
else if (object == NULL)
|
||||||
type = "NULL pointer";
|
type = "NULL pointer";
|
||||||
|
26
mm/vmalloc.c
26
mm/vmalloc.c
@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
|
|||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
bool vmalloc_dump_obj(void *object)
|
bool vmalloc_dump_obj(void *object)
|
||||||
{
|
{
|
||||||
struct vm_struct *vm;
|
|
||||||
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
|
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
|
||||||
|
const void *caller;
|
||||||
|
struct vm_struct *vm;
|
||||||
|
struct vmap_area *va;
|
||||||
|
unsigned long addr;
|
||||||
|
unsigned int nr_pages;
|
||||||
|
|
||||||
vm = find_vm_area(objp);
|
if (!spin_trylock(&vmap_area_lock))
|
||||||
if (!vm)
|
|
||||||
return false;
|
return false;
|
||||||
|
va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
|
||||||
|
if (!va) {
|
||||||
|
spin_unlock(&vmap_area_lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
vm = va->vm;
|
||||||
|
if (!vm) {
|
||||||
|
spin_unlock(&vmap_area_lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
addr = (unsigned long)vm->addr;
|
||||||
|
caller = vm->caller;
|
||||||
|
nr_pages = vm->nr_pages;
|
||||||
|
spin_unlock(&vmap_area_lock);
|
||||||
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
|
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
|
||||||
vm->nr_pages, (unsigned long)vm->addr, vm->caller);
|
nr_pages, addr, caller);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -8,8 +8,8 @@ TARGETS=page-types slabinfo page_owner_sort
|
|||||||
LIB_DIR = ../lib/api
|
LIB_DIR = ../lib/api
|
||||||
LIBS = $(LIB_DIR)/libapi.a
|
LIBS = $(LIB_DIR)/libapi.a
|
||||||
|
|
||||||
CFLAGS += -Wall -Wextra -I../lib/
|
CFLAGS += -Wall -Wextra -I../lib/ -pthread
|
||||||
LDFLAGS += $(LIBS)
|
LDFLAGS += $(LIBS) -pthread
|
||||||
|
|
||||||
all: $(TARGETS)
|
all: $(TARGETS)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user