diff --git a/.mailmap b/.mailmap index a33b9f56357c..5dd318121982 100644 --- a/.mailmap +++ b/.mailmap @@ -13,7 +13,9 @@ Aaron Durbin Abel Vesa Abel Vesa +Abhijeet Dharmapurikar Abhinav Kumar +Ahmad Masri Adam Oldham Adam Radford Adriana Reus @@ -30,6 +32,7 @@ Alexander Mikhalitsyn Alexandre Belloni Alexandre Ghiti +Alexei Avshalom Lazar Alexei Starovoitov Alexei Starovoitov Alexei Starovoitov @@ -37,8 +40,11 @@ Alex Hung Alex Shi Alex Shi Alex Shi +Aloka Dixit Al Viro Al Viro +Amit Blay +Amit Nischal Andi Kleen Andi Shyti Andreas Herrmann @@ -54,6 +60,8 @@ Andrey Ryabinin Andrzej Hajda André Almeida Andy Adamson +Anilkumar Kolli +Anirudh Ghayal Antoine Tenart Antoine Tenart Antonio Ospite @@ -62,9 +70,17 @@ Archit Taneja Ard Biesheuvel Arnaud Patard Arnd Bergmann +Arun Kumar Neelakantam +Ashok Raj Nagarajan +Ashwin Chaugule +Asutosh Das Atish Patra +Avaneesh Kumar Dwivedi Axel Dyks Axel Lin +Balakrishna Godavarthi +Banajit Goswami +Baochen Qiang Baolin Wang Baolin Wang Baolin Wang @@ -93,12 +109,15 @@ Brian Avery Brian King Brian Silverman Cai Huoqing +Can Guo +Carl Huang Changbin Du Changbin Du Chao Yu Chao Yu Chris Chiu Chris Chiu +Chris Lew Christian Borntraeger Christian Borntraeger Christian Borntraeger @@ -119,7 +138,10 @@ Daniel Borkmann Daniel Borkmann Daniel Borkmann David Brownell +David Collins David Woodhouse +Dedy Lansky +Deepak Kumar Singh Dengcheng Zhu Dengcheng Zhu Dengcheng Zhu @@ -136,6 +158,7 @@ Dmitry Safonov <0x7f454c46@gmail.com> Domen Puncer Douglas Gilbert Ed L. Cashin +Elliot Berman Enric Balletbo i Serra Enric Balletbo i Serra Erik Kaneda @@ -148,6 +171,7 @@ Faith Ekstrand Felipe W Damasio Felix Kuhling Felix Moeller +Fenglin Wu Filipe Lautert Finn Thain Franck Bui-Huu @@ -171,8 +195,11 @@ Greg Kurz Gregory CLEMENT Guilherme G. Piccoli Guilherme G. Piccoli +Gokul Sriram Palanisamy +Govindaraj Saminathan Guo Ren Guo Ren +Guru Das Srinagesh Gustavo Padovan Gustavo Padovan Hanjun Guo @@ -190,6 +217,7 @@ Huacai Chen J. Bruce Fields J. Bruce Fields Jacob Shin +Jack Pham Jaegeuk Kim Jaegeuk Kim Jaegeuk Kim @@ -217,10 +245,12 @@ Jayachandran C Jayachandran C Jean Tourrilhes +Jeevan Shriram Jeff Garzik Jeff Layton Jeff Layton Jeff Layton +Jeffrey Hugo Jens Axboe Jens Axboe Jens Axboe @@ -228,6 +258,7 @@ Jens Axboe Jens Osterkamp Jernej Skrabec Jessica Zhang +Jilai Wang Jiri Pirko Jiri Pirko Jiri Pirko @@ -238,6 +269,7 @@ Jiri Slaby Jiri Slaby Jisheng Zhang Jisheng Zhang +Jishnu Prakash Johan Hovold Johan Hovold John Crispin @@ -256,6 +288,7 @@ Jordan Crouse Josh Poimboeuf Josh Poimboeuf +Jouni Malinen Juha Yrjola Juha Yrjola Juha Yrjola @@ -263,6 +296,8 @@ Julien Thierry Iskren Chernev Kalle Valo Kalyan Thota +Karthikeyan Periyasamy +Kathiravan T Kay Sievers Kees Cook Kees Cook @@ -271,6 +306,8 @@ Kees Cook Keith Busch Keith Busch Kenneth W Chen +Kenneth Westfield +Kiran Gunda Kirill Tkhai Konstantin Khlebnikov Konstantin Khlebnikov @@ -279,6 +316,7 @@ Krishna Manikandan Krzysztof Kozlowski Krzysztof Kozlowski Krzysztof Kozlowski +Kshitiz Godara Kuninori Morimoto Kuogee Hsieh Lee Jones @@ -292,19 +330,27 @@ Leonid I Ananiev Leon Romanovsky Leon Romanovsky Leon Romanovsky +Liam Mark Linas Vepstas Linus Lüssing Linus Lüssing Li Yang Li Yang +Lior David Lorenzo Pieralisi Luca Ceresoli Lukasz Luba +Luo Jie Maciej W. Rozycki Maciej W. Rozycki +Maharaja Kennadyrajan +Maheshwar Ajja +Malathi Gottam +Manikanta Pubbisetty Manivannan Sadhasivam Manivannan Sadhasivam +Manoj Basapathi Marcin Nowakowski Marc Zyngier Marek Behún @@ -334,6 +380,7 @@ Matt Ranostay Matt Ranostay Matthew Ranostay Matt Ranostay Matt Redfearn +Maulik Shah Mauro Carvalho Chehab Mauro Carvalho Chehab Mauro Carvalho Chehab @@ -346,7 +393,10 @@ Maxim Mikityanskiy Maxime Ripard Maxime Ripard Maxime Ripard +Maya Erez Mayuresh Janorkar +Md Sadre Alam +Miaoqing Pan Michael Buesch Michal Simek Michel Dänzer @@ -357,6 +407,7 @@ Miguel Ojeda Mike Rapoport Mike Rapoport Mike Rapoport +Mike Tipton Miodrag Dinic Miquel Raynal Mitesh shah @@ -365,9 +416,13 @@ Morten Welinder Morten Welinder Morten Welinder Morten Welinder +Mukesh Ojha +Muna Sinada +Murali Nalajala Mythri P K Nadia Yvette Chambers William Lee Irwin III Nathan Chancellor +Neeraj Upadhyay Neil Armstrong Nguyen Anh Quynh Nicholas Piggin @@ -386,6 +441,7 @@ Nikolay Aleksandrov Nikolay Aleksandrov Nikolay Aleksandrov Nikolay Aleksandrov +Odelu Kukatla Oleksandr Natalenko Oleksij Rempel Oleksij Rempel @@ -393,6 +449,7 @@ Oleksij Rempel Oleksij Rempel Oleksij Rempel Oliver Upton +Oza Pawandeep Pali Rohár Paolo 'Blaisorblade' Giarrusso Patrick Mochel @@ -404,11 +461,14 @@ Paul E. McKenney Paul E. McKenney Paul Mackerras Paul Mackerras +Pavankumar Kondeti Peter A Jonsson Peter Oruba Peter Oruba Pratyush Anand Praveen BP +Pradeep Kumar Chitrapu +Prasad Sodagudi Punit Agrawal Qais Yousef Qais Yousef @@ -417,10 +477,16 @@ Quentin Perret Rafael J. Wysocki Rajeev Nandan Rajendra Nayak +Rajeshwari Ravindra Kamble +Raju P.L.S.S.S.N Rajesh Shah +Rakesh Pillai Ralf Baechle Ralf Wildenhues +Ram Chandra Jangir Randy Dunlap +Ravi Kumar Bokka +Ravi Kumar Siddojigari Rémi Denis-Courmont Ricardo Ribalda Ricardo Ribalda Ricardo Ribalda Delgado @@ -429,6 +495,7 @@ Richard Leitner Richard Leitner Richard Leitner Robert Foss +Rocky Liao Roman Gushchin Roman Gushchin Roman Gushchin @@ -446,24 +513,35 @@ Santosh Shilimkar Santosh Shilimkar Sarangdhar Joshi Sascha Hauer +Sahitya Tummala +Sathishkumar Muruganandam Satya Priya S.Çağlar Onur +Sayali Lokhande Sean Christopherson Sean Nyekjaer +Sean Tranchetti Sebastian Reichel Sebastian Reichel Sedat Dilek +Senthilkumar N L Seth Forshee Shannon Nelson Shannon Nelson Shannon Nelson +Sharath Chandra Vurukala Shiraz Hashim Shuah Khan Shuah Khan Shuah Khan Shuah Khan +Sibi Sankar +Sid Manning Simon Arlott Simon Kelley +Sricharan Ramabadhran +Srinivas Ramana +Sriram R Stéphane Witzmann Stephen Hemminger Stephen Hemminger @@ -471,22 +549,30 @@ Stephen Hemminger Stephen Hemminger Steve Wise Steve Wise -Subash Abhinov Kasiviswanathan +Subash Abhinov Kasiviswanathan +Subbaraman Narayanamurthy Subhash Jadavani +Sudarshan Rajagopalan Sudeep Holla Sudeep KarkadaNagesha Sumit Semwal +Surabhi Vishnoi Takashi YOSHII +Tamizh Chelvam Raja +Taniya Das Tejun Heo Thomas Graf Thomas Körper Thomas Pedersen Tiezhu Yang +Tingwei Zhang +Tirupathi Reddy Tobias Klauser Tobias Klauser Tobias Klauser Tobias Klauser Todor Tomov Tony Luck +Trilok Soni TripleX Chung TripleX Chung Tsuneo Yoshioka @@ -499,11 +585,17 @@ Uwe Kleine-König Uwe Kleine-König Uwe Kleine-König Valdis Kletnieks +Vara Reddy +Varadarajan Narayanan +Vasanthakumar Thiagarajan Vasily Averin Vasily Averin Vasily Averin Vasily Averin Valentin Schneider +Veera Sundaram Sankaran +Veerabhadrarao Badiganti +Venkateswara Naralasetty Vikash Garodia Vinod Koul Vinod Koul @@ -513,11 +605,14 @@ Viresh Kumar Viresh Kumar Viresh Kumar Viresh Kumar +Vivek Aknurwar Vivien Didelot Vlad Dogaru Vladimir Davydov Vladimir Davydov WeiXiong Liao +Wen Gong +Wesley Cheng Will Deacon Wolfram Sang Wolfram Sang diff --git a/Documentation/filesystems/tmpfs.rst b/Documentation/filesystems/tmpfs.rst index f18f46be5c0c..2cd8fa332feb 100644 --- a/Documentation/filesystems/tmpfs.rst +++ b/Documentation/filesystems/tmpfs.rst @@ -84,8 +84,6 @@ nr_inodes The maximum number of inodes for this instance. The default is half of the number of your physical RAM pages, or (on a machine with highmem) the number of lowmem RAM pages, whichever is the lower. -noswap Disables swap. Remounts must respect the original settings. - By default swap is enabled. ========= ============================================================ These parameters accept a suffix k, m or g for kilo, mega and giga and @@ -99,36 +97,31 @@ mount with such options, since it allows any user with write access to use up all the memory on the machine; but enhances the scalability of that instance in a system with many CPUs making intensive use of it. +tmpfs blocks may be swapped out, when there is a shortage of memory. +tmpfs has a mount option to disable its use of swap: + +====== =========================================================== +noswap Disables swap. Remounts must respect the original settings. + By default swap is enabled. +====== =========================================================== + tmpfs also supports Transparent Huge Pages which requires a kernel configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge supported for your system (has_transparent_hugepage(), which is architecture specific). The mount options for this are: -====== ============================================================ -huge=0 never: disables huge pages for the mount -huge=1 always: enables huge pages for the mount -huge=2 within_size: only allocate huge pages if the page will be - fully within i_size, also respect fadvise()/madvise() hints. -huge=3 advise: only allocate huge pages if requested with - fadvise()/madvise() -====== ============================================================ +================ ============================================================== +huge=never Do not allocate huge pages. This is the default. +huge=always Attempt to allocate huge page every time a new page is needed. +huge=within_size Only allocate huge page if it will be fully within i_size. + Also respect madvise(2) hints. +huge=advise Only allocate huge page if requested with madvise(2). +================ ============================================================== -There is a sysfs file which you can also use to control system wide THP -configuration for all tmpfs mounts, the file is: - -/sys/kernel/mm/transparent_hugepage/shmem_enabled - -This sysfs file is placed on top of THP sysfs directory and so is registered -by THP code. It is however only used to control all tmpfs mounts with one -single knob. Since it controls all tmpfs mounts it should only be used either -for emergency or testing purposes. The values you can set for shmem_enabled are: - -== ============================================================ --1 deny: disables huge on shm_mnt and all mounts, for - emergency use --2 force: enables huge on shm_mnt and all mounts, w/o needing - option, for testing -== ============================================================ +See also Documentation/admin-guide/mm/transhuge.rst, which describes the +sysfs file /sys/kernel/mm/transparent_hugepage/shmem_enabled: which can +be used to deny huge pages on all tmpfs mounts in an emergency, or to +force huge pages on all tmpfs mounts for testing. tmpfs has a mount option to set the NUMA memory allocation policy for all files in that instance (if CONFIG_NUMA is enabled) - which can be diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c index 37d60e72cf26..9e71794839e8 100644 --- a/arch/um/os-Linux/sigio.c +++ b/arch/um/os-Linux/sigio.c @@ -3,7 +3,6 @@ * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) */ -#include #include #include #include @@ -51,7 +50,7 @@ static struct pollfds all_sigio_fds; static int write_sigio_thread(void *unused) { - struct pollfds *fds; + struct pollfds *fds, tmp; struct pollfd *p; int i, n, respond_fd; char c; @@ -78,7 +77,9 @@ static int write_sigio_thread(void *unused) "write_sigio_thread : " "read on socket failed, " "err = %d\n", errno); - swap(current_poll, next_poll); + tmp = current_poll; + current_poll = next_poll; + next_poll = tmp; respond_fd = sigio_private[1]; } else { diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index cb80a7703d58..1fb213f379a5 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -132,7 +132,7 @@ ssize_t read_from_oldmem(struct iov_iter *iter, size_t count, u64 *ppos, bool encrypted) { unsigned long pfn, offset; - size_t nr_bytes; + ssize_t nr_bytes; ssize_t read = 0, tmp; int idx; diff --git a/include/linux/mm.h b/include/linux/mm.h index 2dd73e4f3d8e..406ab9ea818f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -641,8 +641,14 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} */ static inline bool vma_start_read(struct vm_area_struct *vma) { - /* Check before locking. A race might cause false locked result. */ - if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) + /* + * Check before locking. A race might cause false locked result. + * We can use READ_ONCE() for the mm_lock_seq here, and don't need + * ACQUIRE semantics, because this is just a lockless check whose result + * we don't rely on for anything - the mm_lock_seq read against which we + * need ordering is below. + */ + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) return false; if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) @@ -653,8 +659,13 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * False unlocked result is impossible because we modify and check * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq * modification invalidates all existing locks. + * + * We must use ACQUIRE semantics for the mm_lock_seq so that if we are + * racing with vma_end_write_all(), we only start reading from the VMA + * after it has been unlocked. + * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { up_read(&vma->vm_lock->lock); return false; } @@ -676,7 +687,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ - *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); + *mm_lock_seq = vma->vm_mm->mm_lock_seq; return (vma->vm_lock_seq == *mm_lock_seq); } @@ -688,7 +699,13 @@ static inline void vma_start_write(struct vm_area_struct *vma) return; down_write(&vma->vm_lock->lock); - vma->vm_lock_seq = mm_lock_seq; + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); } @@ -702,7 +719,7 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma) if (!down_write_trylock(&vma->vm_lock->lock)) return false; - vma->vm_lock_seq = mm_lock_seq; + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); return true; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index de10fc797c8e..5e74ce4a28cd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -514,6 +514,20 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* + * Can only be written (using WRITE_ONCE()) while holding both: + * - mmap_lock (in write mode) + * - vm_lock->lock (in write mode) + * Can be read reliably while holding one of: + * - mmap_lock (in read or write mode) + * - vm_lock->lock (in read or write mode) + * Can be read unreliably (using READ_ONCE()) for pessimistic bailout + * while holding nothing (except RCU to keep the VMA struct allocated). + * + * This sequence counter is explicitly allowed to overflow; sequence + * counter reuse can only lead to occasional unnecessary use of the + * slowpath. + */ int vm_lock_seq; struct vma_lock *vm_lock; @@ -679,6 +693,20 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + /* + * This field has lock-like semantics, meaning it is sometimes + * accessed with ACQUIRE/RELEASE semantics. + * Roughly speaking, incrementing the sequence number is + * equivalent to releasing locks on VMAs; reading the sequence + * number can be part of taking a read lock on a VMA. + * + * Can be modified under write mmap_lock using RELEASE + * semantics. + * Can be read with no other protection when holding write + * mmap_lock. + * Can be read with ACQUIRE semantics if not holding write + * mmap_lock. + */ int mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index aab8f1b28d26..e05e167dbd16 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -76,8 +76,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm) static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); - /* No races during update due to exclusive mmap_lock being held */ - WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); + /* + * Nobody can concurrently modify mm->mm_lock_seq due to exclusive + * mmap_lock being held. + * We need RELEASE semantics here to ensure that preceding stores into + * the VMA take effect before we unlock it with this store. + * Pairs with ACQUIRE semantics in vma_start_read(). + */ + smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); } #else static inline void vma_end_write_all(struct mm_struct *mm) {} diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index c11210124344..bb07721909e1 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -320,25 +320,25 @@ static void damon_test_update_monitoring_result(struct kunit *test) static void damon_test_set_attrs(struct kunit *test) { - struct damon_ctx ctx; + struct damon_ctx *c = damon_new_ctx(); struct damon_attrs valid_attrs = { .min_nr_regions = 10, .max_nr_regions = 1000, .sample_interval = 5000, .aggr_interval = 100000,}; struct damon_attrs invalid_attrs; - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0); + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); invalid_attrs = valid_attrs; invalid_attrs.min_nr_regions = 1; - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); invalid_attrs = valid_attrs; invalid_attrs.max_nr_regions = 9; - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); invalid_attrs = valid_attrs; invalid_attrs.aggr_interval = 4999; - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); } static struct kunit_case damon_test_cases[] = { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index e245191e6b04..ece5d481b5ff 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2487,7 +2487,7 @@ int unpoison_memory(unsigned long pfn) goto unlock_mutex; } - if (!folio_test_hwpoison(folio)) { + if (!PageHWPoison(p)) { unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", pfn, &unpoison_rs); goto unlock_mutex; diff --git a/mm/mmap.c b/mm/mmap.c index 3eda23c9ebe7..3937479d0e07 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -615,6 +615,7 @@ static inline int dup_anon_vma(struct vm_area_struct *dst, * anon pages imported. */ if (src->anon_vma && !dst->anon_vma) { + vma_start_write(dst); dst->anon_vma = src->anon_vma; return anon_vma_clone(dst, src); } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 64437105fe0d..2022333805d3 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -48,8 +48,11 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (walk->no_vma) { /* * pte_offset_map() might apply user-specific validation. + * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() + * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear), + * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them. */ - if (walk->mm == &init_mm) + if (walk->mm == &init_mm || addr >= TASK_SIZE) pte = pte_offset_kernel(pmd, addr); else pte = pte_offset_map(pmd, addr); diff --git a/mm/shmem.c b/mm/shmem.c index 2f2e0e618072..f5af4b943e42 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2796,7 +2796,8 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, if (*ppos >= i_size_read(inode)) break; - error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ); + error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, + SGP_READ); if (error) { if (error == -EINVAL) error = 0; @@ -2805,7 +2806,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, if (folio) { folio_unlock(folio); - if (folio_test_hwpoison(folio)) { + if (folio_test_hwpoison(folio) || + (folio_test_large(folio) && + folio_test_has_hwpoisoned(folio))) { error = -EIO; break; } @@ -2841,7 +2844,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, folio_put(folio); folio = NULL; } else { - n = splice_zeropage_into_pipe(pipe, *ppos, len); + n = splice_zeropage_into_pipe(pipe, *ppos, part); } if (!n) diff --git a/scripts/spelling.txt b/scripts/spelling.txt index fc7ba95e86a0..855c4863124b 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -1541,7 +1541,6 @@ temeprature||temperature temorary||temporary temproarily||temporarily temperture||temperature -thead||thread theads||threads therfore||therefore thier||their