[PATCH] cpuset memory spread page cache implementation and hooks
Change the page cache allocation calls to support cpuset memory spreading. See the previous patch, cpuset_mem_spread, for an explanation of cpuset memory spreading. On systems without cpusets configured in the kernel, this is no change. On systems with cpusets configured in the kernel, but the "memory_spread" cpuset option not enabled for the current tasks cpuset, this adds a call to a cpuset routine and failed bit test of the processor state flag PF_SPREAD_PAGE. On tasks in cpusets with "memory_spread" enabled, this adds a call to a cpuset routine that computes which of the tasks mems_allowed nodes should be preferred for this allocation. If memory spreading applies to a particular allocation, then any other NUMA mempolicy does not apply. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
825a46af5a
commit
44110fe385
@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
|
|||||||
#define page_cache_release(page) put_page(page)
|
#define page_cache_release(page) put_page(page)
|
||||||
void release_pages(struct page **pages, int nr, int cold);
|
void release_pages(struct page **pages, int nr, int cold);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
extern struct page *page_cache_alloc(struct address_space *x);
|
||||||
|
extern struct page *page_cache_alloc_cold(struct address_space *x);
|
||||||
|
#else
|
||||||
static inline struct page *page_cache_alloc(struct address_space *x)
|
static inline struct page *page_cache_alloc(struct address_space *x)
|
||||||
{
|
{
|
||||||
return alloc_pages(mapping_gfp_mask(x), 0);
|
return alloc_pages(mapping_gfp_mask(x), 0);
|
||||||
@ -60,6 +64,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
|
|||||||
{
|
{
|
||||||
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
|
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef int filler_t(void *, struct page *);
|
typedef int filler_t(void *, struct page *);
|
||||||
|
|
||||||
|
23
mm/filemap.c
23
mm/filemap.c
@ -29,6 +29,7 @@
|
|||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
|
#include <linux/cpuset.h>
|
||||||
#include "filemap.h"
|
#include "filemap.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
@ -427,6 +428,28 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
struct page *page_cache_alloc(struct address_space *x)
|
||||||
|
{
|
||||||
|
if (cpuset_do_page_mem_spread()) {
|
||||||
|
int n = cpuset_mem_spread_node();
|
||||||
|
return alloc_pages_node(n, mapping_gfp_mask(x), 0);
|
||||||
|
}
|
||||||
|
return alloc_pages(mapping_gfp_mask(x), 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(page_cache_alloc);
|
||||||
|
|
||||||
|
struct page *page_cache_alloc_cold(struct address_space *x)
|
||||||
|
{
|
||||||
|
if (cpuset_do_page_mem_spread()) {
|
||||||
|
int n = cpuset_mem_spread_node();
|
||||||
|
return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
|
||||||
|
}
|
||||||
|
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(page_cache_alloc_cold);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to wait for pages to become available there must be
|
* In order to wait for pages to become available there must be
|
||||||
* waitqueues associated with pages. By using a hash table of
|
* waitqueues associated with pages. By using a hash table of
|
||||||
|
Loading…
x
Reference in New Issue
Block a user