mm: use fallthrough;
Convert the various /* fallthrough */ comments to the pseudo-keyword fallthrough; Done via script: https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe@perches.com/ Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Gustavo A. R. Silva <gustavo@embeddedor.com> Link: http://lkml.kernel.org/r/f62fea5d10eb0ccfc05d87c242a620c261219b66.camel@perches.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e46b893dd1
commit
e4a9bc5896
2
mm/gup.c
2
mm/gup.c
@ -1102,7 +1102,7 @@ retry:
|
||||
goto retry;
|
||||
case -EBUSY:
|
||||
ret = 0;
|
||||
/* FALLTHRU */
|
||||
fallthrough;
|
||||
case -EFAULT:
|
||||
case -ENOMEM:
|
||||
case -EHWPOISON:
|
||||
|
@ -467,14 +467,14 @@ static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
|
||||
switch (MEMFILE_ATTR(cft->private)) {
|
||||
case RES_RSVD_USAGE:
|
||||
counter = &h_cg->rsvd_hugepage[idx];
|
||||
/* Fall through. */
|
||||
fallthrough;
|
||||
case RES_USAGE:
|
||||
val = (u64)page_counter_read(counter);
|
||||
seq_printf(seq, "%llu\n", val * PAGE_SIZE);
|
||||
break;
|
||||
case RES_RSVD_LIMIT:
|
||||
counter = &h_cg->rsvd_hugepage[idx];
|
||||
/* Fall through. */
|
||||
fallthrough;
|
||||
case RES_LIMIT:
|
||||
val = (u64)counter->max;
|
||||
if (val == limit)
|
||||
@ -514,7 +514,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
|
||||
switch (MEMFILE_ATTR(of_cft(of)->private)) {
|
||||
case RES_RSVD_LIMIT:
|
||||
rsvd = true;
|
||||
/* Fall through. */
|
||||
fallthrough;
|
||||
case RES_LIMIT:
|
||||
mutex_lock(&hugetlb_limit_mutex);
|
||||
ret = page_counter_set_max(
|
||||
|
3
mm/ksm.c
3
mm/ksm.c
@ -2813,8 +2813,7 @@ static int ksm_memory_callback(struct notifier_block *self,
|
||||
*/
|
||||
ksm_check_stable_tree(mn->start_pfn,
|
||||
mn->start_pfn + mn->nr_pages);
|
||||
/* fallthrough */
|
||||
|
||||
fallthrough;
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
mutex_lock(&ksm_thread_mutex);
|
||||
ksm_run &= ~KSM_RUN_OFFLINE;
|
||||
|
@ -223,7 +223,7 @@ restart:
|
||||
switch (ret) {
|
||||
case LRU_REMOVED_RETRY:
|
||||
assert_spin_locked(&nlru->lock);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case LRU_REMOVED:
|
||||
isolated++;
|
||||
nlru->nr_items--;
|
||||
|
@ -5813,7 +5813,7 @@ retry:
|
||||
switch (get_mctgt_type(vma, addr, ptent, &target)) {
|
||||
case MC_TARGET_DEVICE:
|
||||
device = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case MC_TARGET_PAGE:
|
||||
page = target.page;
|
||||
/*
|
||||
|
@ -881,7 +881,6 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
|
||||
|
||||
switch (p->mode) {
|
||||
case MPOL_BIND:
|
||||
/* Fall through */
|
||||
case MPOL_INTERLEAVE:
|
||||
*nodes = p->v.nodes;
|
||||
break;
|
||||
@ -2066,7 +2065,6 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
|
||||
break;
|
||||
|
||||
case MPOL_BIND:
|
||||
/* Fall through */
|
||||
case MPOL_INTERLEAVE:
|
||||
*mask = mempolicy->v.nodes;
|
||||
break;
|
||||
@ -2333,7 +2331,6 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
||||
|
||||
switch (a->mode) {
|
||||
case MPOL_BIND:
|
||||
/* Fall through */
|
||||
case MPOL_INTERLEAVE:
|
||||
return !!nodes_equal(a->v.nodes, b->v.nodes);
|
||||
case MPOL_PREFERRED:
|
||||
|
@ -1460,7 +1460,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||
* with MAP_SHARED to preserve backward compatibility.
|
||||
*/
|
||||
flags &= LEGACY_MAP_MASK;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case MAP_SHARED_VALIDATE:
|
||||
if (flags & ~flags_mask)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1487,8 +1487,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||
vm_flags |= VM_SHARED | VM_MAYSHARE;
|
||||
if (!(file->f_mode & FMODE_WRITE))
|
||||
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
|
||||
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case MAP_PRIVATE:
|
||||
if (!(file->f_mode & FMODE_READ))
|
||||
return -EACCES;
|
||||
|
@ -3996,7 +3996,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
|
||||
if (i_size >= HPAGE_PMD_SIZE &&
|
||||
i_size >> PAGE_SHIFT >= off)
|
||||
return true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case SHMEM_HUGE_ADVISE:
|
||||
/* TODO: implement fadvise() hints */
|
||||
return (vma->vm_flags & VM_HUGEPAGE);
|
||||
|
@ -424,7 +424,7 @@ static void *zs_zpool_map(void *pool, unsigned long handle,
|
||||
case ZPOOL_MM_WO:
|
||||
zs_mm = ZS_MM_WO;
|
||||
break;
|
||||
case ZPOOL_MM_RW: /* fall through */
|
||||
case ZPOOL_MM_RW:
|
||||
default:
|
||||
zs_mm = ZS_MM_RW;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user