Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "14 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  rapidio/tsi721: fix incorrect detection of address translation condition
  rapidio/documentation/mport_cdev: add missing parameter description
  kernel/fork: fix CLONE_CHILD_CLEARTID regression in nscd
  MAINTAINERS: Vladimir has moved
  mm, mempolicy: task->mempolicy must be NULL before dropping final reference
  printk/nmi: avoid direct printk()-s from __printk_nmi_flush()
  treewide: remove references to the now unnecessary DEFINE_PCI_DEVICE_TABLE
  drivers/scsi/wd719x.c: remove last declaration using DEFINE_PCI_DEVICE_TABLE
  mm, vmscan: only allocate and reclaim from zones with pages managed by the buddy allocator
  lib/test_hash.c: fix warning in preprocessor symbol evaluation
  lib/test_hash.c: fix warning in two-dimensional array init
  kconfig: tinyconfig: provide whole choice blocks to avoid warnings
  kexec: fix double-free when failing to relocate the purgatory
  mm, oom: prevent premature OOM killer invocation for high order request
This commit is contained in:
Linus Torvalds 2016-09-01 18:23:22 -07:00
commit b9677faf45
21 changed files with 116 additions and 124 deletions

View File

@ -158,6 +158,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com> Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yusuke Goda <goda.yusuke@renesas.com> Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <gustavo@las.ic.unicamp.br>

View File

@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
The ID table is an array of struct pci_device_id entries ending with an The ID table is an array of struct pci_device_id entries ending with an
all-zero entry. Definitions with static const are generally preferred. all-zero entry. Definitions with static const are generally preferred.
Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
Each entry consists of: Each entry consists of:

View File

@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
III. Module parameters III. Module parameters
- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
This parameter set a maximum completion wait time for SYNC mode DMA
transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
- 'dbg_level' - This parameter allows to control amount of debug information - 'dbg_level' - This parameter allows to control amount of debug information
generated by this device driver. This parameter is formed by set of generated by this device driver. This parameter is formed by set of
bit masks that correspond to the specific functional blocks. bit masks that correspond to the specific functional blocks.

View File

@ -3247,7 +3247,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org> M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org> M: Michal Hocko <mhocko@kernel.org>
M: Vladimir Davydov <vdavydov@virtuozzo.com> M: Vladimir Davydov <vdavydov.dev@gmail.com>
L: cgroups@vger.kernel.org L: cgroups@vger.kernel.org
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained

View File

@ -1 +1,3 @@
CONFIG_NOHIGHMEM=y CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set

View File

@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
} else if (ibw_start < (ib_win->rstart + ib_win->size) && } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
(ibw_start + ibw_size) > ib_win->rstart) { (ibw_start + ibw_size) > ib_win->rstart) {
/* Return error if address translation involved */ /* Return error if address translation involved */
if (direct && ib_win->xlat) { if (!direct || ib_win->xlat) {
ret = -EFAULT; ret = -EFAULT;
break; break;
} }

View File

@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
scsi_host_put(sh); scsi_host_put(sh);
} }
static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = { static const struct pci_device_id wd719x_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
{} {}
}; };

View File

@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
} }
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
#else #else
@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
return -1; /* no node preference */ return -1; /* no node preference */
} }
static inline void mpol_put_task_policy(struct task_struct *task)
{
}
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#endif #endif

View File

@ -828,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/ */
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
static inline int populated_zone(struct zone *zone) /*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
static inline bool managed_zone(struct zone *zone)
{ {
return (!!zone->present_pages); return zone->managed_pages;
}
/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
{
return zone->present_pages;
} }
extern int movable_zone; extern int movable_zone;

View File

@ -682,15 +682,6 @@ struct pci_driver {
#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
/**
* DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
* @_table: device table name
*
* This macro is deprecated and should not be used in new code.
*/
#define DEFINE_PCI_DEVICE_TABLE(_table) \
const struct pci_device_id _table[]
/** /**
* PCI_DEVICE - macro used to describe a specific pci device * PCI_DEVICE - macro used to describe a specific pci device
* @vend: the 16 bit PCI Vendor ID * @vend: the 16 bit PCI Vendor ID

View File

@ -1,4 +1,12 @@
# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
CONFIG_KERNEL_XZ=y CONFIG_KERNEL_XZ=y
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
CONFIG_OPTIMIZE_INLINING=y CONFIG_OPTIMIZE_INLINING=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y CONFIG_SLOB=y

View File

@ -848,12 +848,7 @@ void do_exit(long code)
TASKS_RCU(preempt_enable()); TASKS_RCU(preempt_enable());
exit_notify(tsk, group_dead); exit_notify(tsk, group_dead);
proc_exit_connector(tsk); proc_exit_connector(tsk);
#ifdef CONFIG_NUMA mpol_put_task_policy(tsk);
task_lock(tsk);
mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
task_unlock(tsk);
#endif
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache)) if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache); kfree(current->pi_state_cache);

View File

@ -936,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
deactivate_mm(tsk, mm); deactivate_mm(tsk, mm);
/* /*
* If we're exiting normally, clear a user-space tid field if * Signal userspace if we're not exiting with a core dump
* requested. We leave this alone when dying by signal, to leave * because we want to leave the value intact for debugging
* the value intact in a core dump, and to save the unnecessary * purposes.
* trouble, say, a killed vfork parent shouldn't touch this mm.
* Userland only wants this done for a sys_exit.
*/ */
if (tsk->clear_child_tid) { if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) && if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
atomic_read(&mm->mm_users) > 1) { atomic_read(&mm->mm_users) > 1) {
/* /*
* We don't check the error code - if userspace has * We don't check the error code - if userspace has

View File

@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
return 0; return 0;
out: out:
vfree(pi->sechdrs); vfree(pi->sechdrs);
pi->sechdrs = NULL;
vfree(pi->purgatory_buf); vfree(pi->purgatory_buf);
pi->purgatory_buf = NULL;
return ret; return ret;
} }

View File

@ -99,26 +99,32 @@ again:
return add; return add;
} }
/* static void printk_nmi_flush_line(const char *text, int len)
* printk one line from the temporary buffer from @start index until
* and including the @end index.
*/
static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
{ {
const char *buf = s->buffer + start;
/* /*
* The buffers are flushed in NMI only on panic. The messages must * The buffers are flushed in NMI only on panic. The messages must
* go only into the ring buffer at this stage. Consoles will get * go only into the ring buffer at this stage. Consoles will get
* explicitly called later when a crashdump is not generated. * explicitly called later when a crashdump is not generated.
*/ */
if (in_nmi()) if (in_nmi())
printk_deferred("%.*s", (end - start) + 1, buf); printk_deferred("%.*s", len, text);
else else
printk("%.*s", (end - start) + 1, buf); printk("%.*s", len, text);
} }
/*
* printk one line from the temporary buffer from @start index until
* and including the @end index.
*/
static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
int start, int end)
{
const char *buf = s->buffer + start;
printk_nmi_flush_line(buf, (end - start) + 1);
}
/* /*
* Flush data from the associated per_CPU buffer. The function * Flush data from the associated per_CPU buffer. The function
* can be called either via IRQ work or independently. * can be called either via IRQ work or independently.
@ -150,9 +156,11 @@ more:
* the buffer an unexpected way. If we printed something then * the buffer an unexpected way. If we printed something then
* @len must only increase. * @len must only increase.
*/ */
if (i && i >= len) if (i && i >= len) {
pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n", const char *msg = "printk_nmi_flush: internal error\n";
i, len);
printk_nmi_flush_line(msg, strlen(msg));
}
if (!len) if (!len)
goto out; /* Someone else has already flushed the buffer. */ goto out; /* Someone else has already flushed the buffer. */
@ -166,14 +174,14 @@ more:
/* Print line by line. */ /* Print line by line. */
for (; i < size; i++) { for (; i < size; i++) {
if (s->buffer[i] == '\n') { if (s->buffer[i] == '\n') {
print_nmi_seq_line(s, last_i, i); printk_nmi_flush_seq_line(s, last_i, i);
last_i = i + 1; last_i = i + 1;
} }
} }
/* Check if there was a partial line. */ /* Check if there was a partial line. */
if (last_i < size) { if (last_i < size) {
print_nmi_seq_line(s, last_i, size - 1); printk_nmi_flush_seq_line(s, last_i, size - 1);
pr_cont("\n"); printk_nmi_flush_line("\n", strlen("\n"));
} }
/* /*

View File

@ -143,7 +143,7 @@ static int __init
test_hash_init(void) test_hash_init(void)
{ {
char buf[SIZE+1]; char buf[SIZE+1];
u32 string_or = 0, hash_or[2][33] = { 0 }; u32 string_or = 0, hash_or[2][33] = { { 0, } };
unsigned tests = 0; unsigned tests = 0;
unsigned long long h64 = 0; unsigned long long h64 = 0;
int i, j; int i, j;
@ -219,21 +219,27 @@ test_hash_init(void)
} }
/* Issue notices about skipped tests. */ /* Issue notices about skipped tests. */
#ifndef HAVE_ARCH__HASH_32 #ifdef HAVE_ARCH__HASH_32
pr_info("__hash_32() has no arch implementation to test."); #if HAVE_ARCH__HASH_32 != 1
#elif HAVE_ARCH__HASH_32 != 1
pr_info("__hash_32() is arch-specific; not compared to generic."); pr_info("__hash_32() is arch-specific; not compared to generic.");
#endif #endif
#ifndef HAVE_ARCH_HASH_32 #else
pr_info("hash_32() has no arch implementation to test."); pr_info("__hash_32() has no arch implementation to test.");
#elif HAVE_ARCH_HASH_32 != 1 #endif
#ifdef HAVE_ARCH_HASH_32
#if HAVE_ARCH_HASH_32 != 1
pr_info("hash_32() is arch-specific; not compared to generic."); pr_info("hash_32() is arch-specific; not compared to generic.");
#endif #endif
#ifndef HAVE_ARCH_HASH_64 #else
pr_info("hash_64() has no arch implementation to test."); pr_info("hash_32() has no arch implementation to test.");
#elif HAVE_ARCH_HASH_64 != 1 #endif
#ifdef HAVE_ARCH_HASH_64
#if HAVE_ARCH_HASH_64 != 1
pr_info("hash_64() is arch-specific; not compared to generic."); pr_info("hash_64() is arch-specific; not compared to generic.");
#endif #endif
#else
pr_info("hash_64() has no arch implementation to test.");
#endif
pr_notice("%u tests passed.", tests); pr_notice("%u tests passed.", tests);

View File

@ -2336,6 +2336,23 @@ out:
return ret; return ret;
} }
/*
* Drop the (possibly final) reference to task->mempolicy. It needs to be
* dropped after task->mempolicy is set to NULL so that any allocation done as
* part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
* policy.
*/
void mpol_put_task_policy(struct task_struct *task)
{
struct mempolicy *pol;
task_lock(task);
pol = task->mempolicy;
task->mempolicy = NULL;
task_unlock(task);
mpol_put(pol);
}
static void sp_delete(struct shared_policy *sp, struct sp_node *n) static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{ {
pr_debug("deleting %lx-l%lx\n", n->start, n->end); pr_debug("deleting %lx-l%lx\n", n->start, n->end);

View File

@ -3137,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL; return NULL;
} }
static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
enum compact_result compact_result,
enum compact_priority *compact_priority,
int compaction_retries)
{
int max_retries = MAX_COMPACT_RETRIES;
if (!order)
return false;
/*
* compaction considers all the zone as desperately out of memory
* so it doesn't really make much sense to retry except when the
* failure could be caused by insufficient priority
*/
if (compaction_failed(compact_result)) {
if (*compact_priority > MIN_COMPACT_PRIORITY) {
(*compact_priority)--;
return true;
}
return false;
}
/*
* make sure the compaction wasn't deferred or didn't bail out early
* due to locks contention before we declare that we should give up.
* But do not retry if the given zonelist is not suitable for
* compaction.
*/
if (compaction_withdrawn(compact_result))
return compaction_zonelist_suitable(ac, order, alloc_flags);
/*
* !costly requests are much more important than __GFP_REPEAT
* costly ones because they are de facto nofail and invoke OOM
* killer to move on while costly can fail and users are ready
* to cope with that. 1/4 retries is rather arbitrary but we
* would need much more detailed feedback from compaction to
* make a better decision.
*/
if (order > PAGE_ALLOC_COSTLY_ORDER)
max_retries /= 4;
if (compaction_retries <= max_retries)
return true;
return false;
}
#else #else
static inline struct page * static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@ -3195,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL; return NULL;
} }
#endif /* CONFIG_COMPACTION */
static inline bool static inline bool
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
enum compact_result compact_result, enum compact_result compact_result,
@ -3221,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
} }
return false; return false;
} }
#endif /* CONFIG_COMPACTION */
/* Perform direct synchronous page reclaim */ /* Perform direct synchronous page reclaim */
static int static int
@ -4407,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
do { do {
zone_type--; zone_type--;
zone = pgdat->node_zones + zone_type; zone = pgdat->node_zones + zone_type;
if (populated_zone(zone)) { if (managed_zone(zone)) {
zoneref_set_zone(zone, zoneref_set_zone(zone,
&zonelist->_zonerefs[nr_zones++]); &zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type); check_highest_zone(zone_type);
@ -4645,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
for (j = 0; j < nr_nodes; j++) { for (j = 0; j < nr_nodes; j++) {
node = node_order[j]; node = node_order[j];
z = &NODE_DATA(node)->node_zones[zone_type]; z = &NODE_DATA(node)->node_zones[zone_type];
if (populated_zone(z)) { if (managed_zone(z)) {
zoneref_set_zone(z, zoneref_set_zone(z,
&zonelist->_zonerefs[pos++]); &zonelist->_zonerefs[pos++]);
check_highest_zone(zone_type); check_highest_zone(zone_type);

View File

@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
for (zid = sc->reclaim_idx; zid >= 0; zid--) { for (zid = sc->reclaim_idx; zid >= 0; zid--) {
zone = &pgdat->node_zones[zid]; zone = &pgdat->node_zones[zid];
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE + if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
struct zone *zone = &pgdat->node_zones[zid]; struct zone *zone = &pgdat->node_zones[zid];
unsigned long inactive_zone, active_zone; unsigned long inactive_zone, active_zone;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
inactive_zone = zone_page_state(zone, inactive_zone = zone_page_state(zone,
@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
for (z = 0; z < MAX_NR_ZONES; z++) { for (z = 0; z < MAX_NR_ZONES; z++) {
struct zone *zone = &pgdat->node_zones[z]; struct zone *zone = &pgdat->node_zones[z];
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
total_high_wmark += high_wmark_pages(zone); total_high_wmark += high_wmark_pages(zone);
@ -2510,7 +2510,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
/* If compaction would go ahead or the allocation would succeed, stop */ /* If compaction would go ahead or the allocation would succeed, stop */
for (z = 0; z <= sc->reclaim_idx; z++) { for (z = 0; z <= sc->reclaim_idx; z++) {
struct zone *zone = &pgdat->node_zones[z]; struct zone *zone = &pgdat->node_zones[z];
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@ -2840,7 +2840,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
for (i = 0; i <= ZONE_NORMAL; i++) { for (i = 0; i <= ZONE_NORMAL; i++) {
zone = &pgdat->node_zones[i]; zone = &pgdat->node_zones[i];
if (!populated_zone(zone) || if (!managed_zone(zone) ||
pgdat_reclaimable_pages(pgdat) == 0) pgdat_reclaimable_pages(pgdat) == 0)
continue; continue;
@ -3141,7 +3141,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
for (i = 0; i <= classzone_idx; i++) { for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i; struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
if (!zone_balanced(zone, order, classzone_idx)) if (!zone_balanced(zone, order, classzone_idx))
@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
sc->nr_to_reclaim = 0; sc->nr_to_reclaim = 0;
for (z = 0; z <= sc->reclaim_idx; z++) { for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z; zone = pgdat->node_zones + z;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@ -3242,7 +3242,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (buffer_heads_over_limit) { if (buffer_heads_over_limit) {
for (i = MAX_NR_ZONES - 1; i >= 0; i--) { for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
zone = pgdat->node_zones + i; zone = pgdat->node_zones + i;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
sc.reclaim_idx = i; sc.reclaim_idx = i;
@ -3262,7 +3262,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
*/ */
for (i = classzone_idx; i >= 0; i--) { for (i = classzone_idx; i >= 0; i--) {
zone = pgdat->node_zones + i; zone = pgdat->node_zones + i;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
if (zone_balanced(zone, sc.order, classzone_idx)) if (zone_balanced(zone, sc.order, classzone_idx))
@ -3508,7 +3508,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
pg_data_t *pgdat; pg_data_t *pgdat;
int z; int z;
if (!populated_zone(zone)) if (!managed_zone(zone))
return; return;
if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@ -3522,7 +3522,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
/* Only wake kswapd if all zones are unbalanced */ /* Only wake kswapd if all zones are unbalanced */
for (z = 0; z <= classzone_idx; z++) { for (z = 0; z <= classzone_idx; z++) {
zone = pgdat->node_zones + z; zone = pgdat->node_zones + z;
if (!populated_zone(zone)) if (!managed_zone(zone))
continue; continue;
if (zone_balanced(zone, order, classzone_idx)) if (zone_balanced(zone, order, classzone_idx))

View File

@ -3570,15 +3570,6 @@ sub process {
} }
} }
# check for uses of DEFINE_PCI_DEVICE_TABLE
if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
if (WARN("DEFINE_PCI_DEVICE_TABLE",
"Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
}
}
# check for new typedefs, only function parameters and sparse annotations # check for new typedefs, only function parameters and sparse annotations
# make sense. # make sense.
if ($line =~ /\btypedef\s/ && if ($line =~ /\btypedef\s/ &&

View File

@ -206,7 +206,6 @@ regex_c=(
'/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/' '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
'/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/' '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
'/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/' '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
'/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
'/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/' '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
'/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/' '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
'/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/' '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'