mm, hmm: use devm semantics for hmm_devmem_{add, remove}
devm semantics arrange for resources to be torn down when device-driver-probe fails or when device-driver-release completes. Similar to devm_memremap_pages() there is no need to support an explicit remove operation when the users properly adhere to devm semantics. Note that devm_kzalloc() automatically handles allocating node-local memory. Link: http://lkml.kernel.org/r/154275559545.76910.9186690723515469051.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
69324b8f48
commit
58ef15b765
@ -512,8 +512,7 @@ struct hmm_devmem {
|
||||
* enough and allocate struct page for it.
|
||||
*
|
||||
* The device driver can wrap the hmm_devmem struct inside a private device
|
||||
* driver struct. The device driver must call hmm_devmem_remove() before the
|
||||
* device goes away and before freeing the hmm_devmem struct memory.
|
||||
* driver struct.
|
||||
*/
|
||||
struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
struct device *device,
|
||||
@ -521,7 +520,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
struct device *device,
|
||||
struct resource *res);
|
||||
void hmm_devmem_remove(struct hmm_devmem *devmem);
|
||||
|
||||
/*
|
||||
* hmm_devmem_page_set_drvdata - set per-page driver data field
|
||||
|
133
mm/hmm.c
133
mm/hmm.c
@ -987,7 +987,6 @@ static void hmm_devmem_ref_exit(void *data)
|
||||
|
||||
devmem = container_of(ref, struct hmm_devmem, ref);
|
||||
percpu_ref_exit(ref);
|
||||
devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
|
||||
}
|
||||
|
||||
static void hmm_devmem_ref_kill(void *data)
|
||||
@ -998,7 +997,6 @@ static void hmm_devmem_ref_kill(void *data)
|
||||
devmem = container_of(ref, struct hmm_devmem, ref);
|
||||
percpu_ref_kill(ref);
|
||||
wait_for_completion(&devmem->completion);
|
||||
devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
|
||||
}
|
||||
|
||||
static int hmm_devmem_fault(struct vm_area_struct *vma,
|
||||
@ -1036,7 +1034,7 @@ static void hmm_devmem_radix_release(struct resource *resource)
|
||||
mutex_unlock(&hmm_devmem_lock);
|
||||
}
|
||||
|
||||
static void hmm_devmem_release(struct device *dev, void *data)
|
||||
static void hmm_devmem_release(void *data)
|
||||
{
|
||||
struct hmm_devmem *devmem = data;
|
||||
struct resource *resource = devmem->resource;
|
||||
@ -1044,11 +1042,6 @@ static void hmm_devmem_release(struct device *dev, void *data)
|
||||
struct zone *zone;
|
||||
struct page *page;
|
||||
|
||||
if (percpu_ref_tryget_live(&devmem->ref)) {
|
||||
dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
|
||||
percpu_ref_put(&devmem->ref);
|
||||
}
|
||||
|
||||
/* pages are dead and unused, undo the arch mapping */
|
||||
start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
|
||||
npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
|
||||
@ -1174,19 +1167,6 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
|
||||
{
|
||||
struct hmm_devmem *devmem = data;
|
||||
|
||||
return devmem->resource == match_data;
|
||||
}
|
||||
|
||||
static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
|
||||
{
|
||||
devres_release(devmem->device, &hmm_devmem_release,
|
||||
&hmm_devmem_match, devmem->resource);
|
||||
}
|
||||
|
||||
/*
|
||||
* hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
|
||||
*
|
||||
@ -1214,8 +1194,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
|
||||
dev_pagemap_get_ops();
|
||||
|
||||
devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
|
||||
GFP_KERNEL, dev_to_node(device));
|
||||
devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
|
||||
if (!devmem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1229,11 +1208,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
|
||||
0, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto error_percpu_ref;
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
|
||||
if (ret)
|
||||
goto error_devm_add_action;
|
||||
return ERR_PTR(ret);
|
||||
|
||||
size = ALIGN(size, PA_SECTION_SIZE);
|
||||
addr = min((unsigned long)iomem_resource.end,
|
||||
@ -1253,16 +1232,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
|
||||
devmem->resource = devm_request_mem_region(device, addr, size,
|
||||
dev_name(device));
|
||||
if (!devmem->resource) {
|
||||
ret = -ENOMEM;
|
||||
goto error_no_resource;
|
||||
}
|
||||
if (!devmem->resource)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
break;
|
||||
}
|
||||
if (!devmem->resource) {
|
||||
ret = -ERANGE;
|
||||
goto error_no_resource;
|
||||
}
|
||||
if (!devmem->resource)
|
||||
return ERR_PTR(-ERANGE);
|
||||
|
||||
devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
|
||||
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
||||
@ -1271,28 +1246,13 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
|
||||
ret = hmm_devmem_pages_create(devmem);
|
||||
if (ret)
|
||||
goto error_pages;
|
||||
|
||||
devres_add(device, devmem);
|
||||
|
||||
ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
|
||||
if (ret) {
|
||||
hmm_devmem_remove(devmem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return devmem;
|
||||
|
||||
error_pages:
|
||||
devm_release_mem_region(device, devmem->resource->start,
|
||||
resource_size(devmem->resource));
|
||||
error_no_resource:
|
||||
error_devm_add_action:
|
||||
hmm_devmem_ref_kill(&devmem->ref);
|
||||
hmm_devmem_ref_exit(&devmem->ref);
|
||||
error_percpu_ref:
|
||||
devres_free(devmem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_devmem_add);
|
||||
|
||||
@ -1308,8 +1268,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
|
||||
dev_pagemap_get_ops();
|
||||
|
||||
devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
|
||||
GFP_KERNEL, dev_to_node(device));
|
||||
devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
|
||||
if (!devmem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1323,12 +1282,12 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
|
||||
0, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto error_percpu_ref;
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
|
||||
&devmem->ref);
|
||||
if (ret)
|
||||
goto error_devm_add_action;
|
||||
|
||||
return ERR_PTR(ret);
|
||||
|
||||
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
||||
devmem->pfn_last = devmem->pfn_first +
|
||||
@ -1336,59 +1295,21 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
|
||||
ret = hmm_devmem_pages_create(devmem);
|
||||
if (ret)
|
||||
goto error_devm_add_action;
|
||||
|
||||
devres_add(device, devmem);
|
||||
|
||||
ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
|
||||
if (ret) {
|
||||
hmm_devmem_remove(devmem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
|
||||
&devmem->ref);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return devmem;
|
||||
|
||||
error_devm_add_action:
|
||||
hmm_devmem_ref_kill(&devmem->ref);
|
||||
hmm_devmem_ref_exit(&devmem->ref);
|
||||
error_percpu_ref:
|
||||
devres_free(devmem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_devmem_add_resource);
|
||||
|
||||
/*
|
||||
* hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
|
||||
*
|
||||
* @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
|
||||
*
|
||||
* This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
|
||||
* of the device driver. It will free struct page and remove the resource that
|
||||
* reserved the physical address range for this device memory.
|
||||
*/
|
||||
void hmm_devmem_remove(struct hmm_devmem *devmem)
|
||||
{
|
||||
resource_size_t start, size;
|
||||
struct device *device;
|
||||
bool cdm = false;
|
||||
|
||||
if (!devmem)
|
||||
return;
|
||||
|
||||
device = devmem->device;
|
||||
start = devmem->resource->start;
|
||||
size = resource_size(devmem->resource);
|
||||
|
||||
cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
|
||||
hmm_devmem_ref_kill(&devmem->ref);
|
||||
hmm_devmem_ref_exit(&devmem->ref);
|
||||
hmm_devmem_pages_remove(devmem);
|
||||
|
||||
if (!cdm)
|
||||
devm_release_mem_region(device, start, size);
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_devmem_remove);
|
||||
|
||||
/*
|
||||
* A device driver that wants to handle multiple devices memory through a
|
||||
* single fake device can use hmm_device to do so. This is purely a helper
|
||||
|
Loading…
Reference in New Issue
Block a user