mempolicy: rename mpol_free to mpol_put
This is a change that was requested some time ago by Mel Gorman. Makes sense to me, so here it is. Note: I retain the name "mpol_free_shared_policy()" because it actually does free the shared_policy, which is NOT a reference counted object. However, ... The mempolicy object[s] referenced by the shared_policy are reference counted, so mpol_put() is used to release the reference held by the shared_policy. The mempolicy might not be freed at this time, because some task attached to the shared object associated with the shared policy may be in the process of allocating a page based on the mempolicy. In that case, the task performing the allocation will hold a reference on the mempolicy, obtained via mpol_shared_policy_lookup(). The mempolicy will be freed when all tasks holding such a reference have called mpol_put() for the mempolicy. Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Christoph Lameter <clameter@sgi.com> Cc: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3b11630063
commit
f0be3d32b0
@ -71,7 +71,7 @@ struct mm_struct;
|
|||||||
*
|
*
|
||||||
* Freeing policy:
|
* Freeing policy:
|
||||||
* Mempolicy objects are reference counted. A mempolicy will be freed when
|
* Mempolicy objects are reference counted. A mempolicy will be freed when
|
||||||
* mpol_free() decrements the reference count to zero.
|
* mpol_put() decrements the reference count to zero.
|
||||||
*
|
*
|
||||||
* Copying policy objects:
|
* Copying policy objects:
|
||||||
* mpol_copy() allocates a new mempolicy and copies the specified mempolicy
|
* mpol_copy() allocates a new mempolicy and copies the specified mempolicy
|
||||||
@ -98,11 +98,11 @@ struct mempolicy {
|
|||||||
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
|
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void __mpol_free(struct mempolicy *pol);
|
extern void __mpol_put(struct mempolicy *pol);
|
||||||
static inline void mpol_free(struct mempolicy *pol)
|
static inline void mpol_put(struct mempolicy *pol)
|
||||||
{
|
{
|
||||||
if (pol)
|
if (pol)
|
||||||
__mpol_free(pol);
|
__mpol_put(pol);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
|
extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
|
||||||
@ -190,7 +190,7 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mpol_free(struct mempolicy *p)
|
static inline void mpol_put(struct mempolicy *p)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -967,7 +967,7 @@ NORET_TYPE void do_exit(long code)
|
|||||||
proc_exit_connector(tsk);
|
proc_exit_connector(tsk);
|
||||||
exit_notify(tsk, group_dead);
|
exit_notify(tsk, group_dead);
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
mpol_free(tsk->mempolicy);
|
mpol_put(tsk->mempolicy);
|
||||||
tsk->mempolicy = NULL;
|
tsk->mempolicy = NULL;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_FUTEX
|
#ifdef CONFIG_FUTEX
|
||||||
|
@ -1374,7 +1374,7 @@ bad_fork_cleanup_security:
|
|||||||
security_task_free(p);
|
security_task_free(p);
|
||||||
bad_fork_cleanup_policy:
|
bad_fork_cleanup_policy:
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
mpol_free(p->mempolicy);
|
mpol_put(p->mempolicy);
|
||||||
bad_fork_cleanup_cgroup:
|
bad_fork_cleanup_cgroup:
|
||||||
#endif
|
#endif
|
||||||
cgroup_exit(p, cgroup_callbacks_done);
|
cgroup_exit(p, cgroup_callbacks_done);
|
||||||
|
@ -116,7 +116,7 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mpol_free(mpol); /* unref if mpol !NULL */
|
mpol_put(mpol); /* unref if mpol !NULL */
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,7 +529,7 @@ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
|
|||||||
if (!err) {
|
if (!err) {
|
||||||
mpol_get(new);
|
mpol_get(new);
|
||||||
vma->vm_policy = new;
|
vma->vm_policy = new;
|
||||||
mpol_free(old);
|
mpol_put(old);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -595,7 +595,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
|
|||||||
new = mpol_new(mode, flags, nodes);
|
new = mpol_new(mode, flags, nodes);
|
||||||
if (IS_ERR(new))
|
if (IS_ERR(new))
|
||||||
return PTR_ERR(new);
|
return PTR_ERR(new);
|
||||||
mpol_free(current->mempolicy);
|
mpol_put(current->mempolicy);
|
||||||
current->mempolicy = new;
|
current->mempolicy = new;
|
||||||
mpol_set_task_struct_flag();
|
mpol_set_task_struct_flag();
|
||||||
if (new && new->policy == MPOL_INTERLEAVE &&
|
if (new && new->policy == MPOL_INTERLEAVE &&
|
||||||
@ -948,7 +948,7 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
mpol_free(new);
|
mpol_put(new);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1446,14 +1446,14 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
|
nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
|
||||||
if (unlikely(pol != &default_policy &&
|
if (unlikely(pol != &default_policy &&
|
||||||
pol != current->mempolicy))
|
pol != current->mempolicy))
|
||||||
__mpol_free(pol); /* finished with pol */
|
__mpol_put(pol); /* finished with pol */
|
||||||
return node_zonelist(nid, gfp_flags);
|
return node_zonelist(nid, gfp_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
zl = zonelist_policy(GFP_HIGHUSER, pol);
|
zl = zonelist_policy(GFP_HIGHUSER, pol);
|
||||||
if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
|
if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
|
||||||
if (pol->policy != MPOL_BIND)
|
if (pol->policy != MPOL_BIND)
|
||||||
__mpol_free(pol); /* finished with pol */
|
__mpol_put(pol); /* finished with pol */
|
||||||
else
|
else
|
||||||
*mpol = pol; /* unref needed after allocation */
|
*mpol = pol; /* unref needed after allocation */
|
||||||
}
|
}
|
||||||
@ -1512,7 +1512,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
|||||||
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
|
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
|
||||||
if (unlikely(pol != &default_policy &&
|
if (unlikely(pol != &default_policy &&
|
||||||
pol != current->mempolicy))
|
pol != current->mempolicy))
|
||||||
__mpol_free(pol); /* finished with pol */
|
__mpol_put(pol); /* finished with pol */
|
||||||
return alloc_page_interleave(gfp, 0, nid);
|
return alloc_page_interleave(gfp, 0, nid);
|
||||||
}
|
}
|
||||||
zl = zonelist_policy(gfp, pol);
|
zl = zonelist_policy(gfp, pol);
|
||||||
@ -1522,7 +1522,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
|||||||
*/
|
*/
|
||||||
struct page *page = __alloc_pages_nodemask(gfp, 0,
|
struct page *page = __alloc_pages_nodemask(gfp, 0,
|
||||||
zl, nodemask_policy(gfp, pol));
|
zl, nodemask_policy(gfp, pol));
|
||||||
__mpol_free(pol);
|
__mpol_put(pol);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -1624,7 +1624,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Slow path of a mpol destructor. */
|
/* Slow path of a mpol destructor. */
|
||||||
void __mpol_free(struct mempolicy *p)
|
void __mpol_put(struct mempolicy *p)
|
||||||
{
|
{
|
||||||
if (!atomic_dec_and_test(&p->refcnt))
|
if (!atomic_dec_and_test(&p->refcnt))
|
||||||
return;
|
return;
|
||||||
@ -1720,7 +1720,7 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
|
|||||||
{
|
{
|
||||||
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
|
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
|
||||||
rb_erase(&n->nd, &sp->root);
|
rb_erase(&n->nd, &sp->root);
|
||||||
mpol_free(n->policy);
|
mpol_put(n->policy);
|
||||||
kmem_cache_free(sn_cache, n);
|
kmem_cache_free(sn_cache, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1780,7 +1780,7 @@ restart:
|
|||||||
sp_insert(sp, new);
|
sp_insert(sp, new);
|
||||||
spin_unlock(&sp->lock);
|
spin_unlock(&sp->lock);
|
||||||
if (new2) {
|
if (new2) {
|
||||||
mpol_free(new2->policy);
|
mpol_put(new2->policy);
|
||||||
kmem_cache_free(sn_cache, new2);
|
kmem_cache_free(sn_cache, new2);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -1805,7 +1805,7 @@ void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
|
|||||||
/* Policy covers entire file */
|
/* Policy covers entire file */
|
||||||
pvma.vm_end = TASK_SIZE;
|
pvma.vm_end = TASK_SIZE;
|
||||||
mpol_set_shared_policy(info, &pvma, newpol);
|
mpol_set_shared_policy(info, &pvma, newpol);
|
||||||
mpol_free(newpol);
|
mpol_put(newpol);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1848,7 +1848,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
|||||||
n = rb_entry(next, struct sp_node, nd);
|
n = rb_entry(next, struct sp_node, nd);
|
||||||
next = rb_next(&n->nd);
|
next = rb_next(&n->nd);
|
||||||
rb_erase(&n->nd, &p->root);
|
rb_erase(&n->nd, &p->root);
|
||||||
mpol_free(n->policy);
|
mpol_put(n->policy);
|
||||||
kmem_cache_free(sn_cache, n);
|
kmem_cache_free(sn_cache, n);
|
||||||
}
|
}
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(&p->lock);
|
||||||
@ -2068,7 +2068,7 @@ int show_numa_map(struct seq_file *m, void *v)
|
|||||||
* unref shared or other task's mempolicy
|
* unref shared or other task's mempolicy
|
||||||
*/
|
*/
|
||||||
if (pol != &default_policy && pol != current->mempolicy)
|
if (pol != &default_policy && pol != current->mempolicy)
|
||||||
__mpol_free(pol);
|
__mpol_put(pol);
|
||||||
|
|
||||||
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
|
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
|
|||||||
vma->vm_ops->close(vma);
|
vma->vm_ops->close(vma);
|
||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
mpol_free(vma_policy(vma));
|
mpol_put(vma_policy(vma));
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
@ -626,7 +626,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||||||
if (file)
|
if (file)
|
||||||
fput(file);
|
fput(file);
|
||||||
mm->map_count--;
|
mm->map_count--;
|
||||||
mpol_free(vma_policy(next));
|
mpol_put(vma_policy(next));
|
||||||
kmem_cache_free(vm_area_cachep, next);
|
kmem_cache_free(vm_area_cachep, next);
|
||||||
/*
|
/*
|
||||||
* In mprotect's case 6 (see comments on vma_merge),
|
* In mprotect's case 6 (see comments on vma_merge),
|
||||||
@ -1182,7 +1182,7 @@ munmap_back:
|
|||||||
|
|
||||||
if (file && vma_merge(mm, prev, addr, vma->vm_end,
|
if (file && vma_merge(mm, prev, addr, vma->vm_end,
|
||||||
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
|
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
|
||||||
mpol_free(vma_policy(vma));
|
mpol_put(vma_policy(vma));
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
fput(file);
|
fput(file);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1196,7 +1196,7 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
|
|||||||
pvma.vm_ops = NULL;
|
pvma.vm_ops = NULL;
|
||||||
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
|
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
|
||||||
page = swapin_readahead(entry, gfp, &pvma, 0);
|
page = swapin_readahead(entry, gfp, &pvma, 0);
|
||||||
mpol_free(pvma.vm_policy);
|
mpol_put(pvma.vm_policy);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1212,7 +1212,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
|
|||||||
pvma.vm_ops = NULL;
|
pvma.vm_ops = NULL;
|
||||||
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
|
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
|
||||||
page = alloc_page_vma(gfp, &pvma, 0);
|
page = alloc_page_vma(gfp, &pvma, 0);
|
||||||
mpol_free(pvma.vm_policy);
|
mpol_put(pvma.vm_policy);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_NUMA */
|
#else /* !CONFIG_NUMA */
|
||||||
|
Loading…
Reference in New Issue
Block a user