memremap: remove support for external pgmap refcounts
No driver is left using the external pgmap refcount, so remove the code to support it. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Link: https://lore.kernel.org/r/20211028151017.50234-1-hch@lst.de Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
b842f1d14a
commit
b80892ca02
@ -219,7 +219,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||||||
error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
|
error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
|
||||||
pci_bus_address(pdev, bar) + offset,
|
pci_bus_address(pdev, bar) + offset,
|
||||||
range_len(&pgmap->range), dev_to_node(&pdev->dev),
|
range_len(&pgmap->range), dev_to_node(&pdev->dev),
|
||||||
pgmap->ref);
|
&pgmap->ref);
|
||||||
if (error)
|
if (error)
|
||||||
goto pages_free;
|
goto pages_free;
|
||||||
|
|
||||||
|
@ -72,16 +72,6 @@ struct dev_pagemap_ops {
|
|||||||
*/
|
*/
|
||||||
void (*page_free)(struct page *page);
|
void (*page_free)(struct page *page);
|
||||||
|
|
||||||
/*
|
|
||||||
* Transition the refcount in struct dev_pagemap to the dead state.
|
|
||||||
*/
|
|
||||||
void (*kill)(struct dev_pagemap *pgmap);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait for refcount in struct dev_pagemap to be idle and reap it.
|
|
||||||
*/
|
|
||||||
void (*cleanup)(struct dev_pagemap *pgmap);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used for private (un-addressable) device memory only. Must migrate
|
* Used for private (un-addressable) device memory only. Must migrate
|
||||||
* the page back to a CPU accessible page.
|
* the page back to a CPU accessible page.
|
||||||
@ -95,8 +85,7 @@ struct dev_pagemap_ops {
|
|||||||
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
|
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
|
||||||
* @altmap: pre-allocated/reserved memory for vmemmap allocations
|
* @altmap: pre-allocated/reserved memory for vmemmap allocations
|
||||||
* @ref: reference count that pins the devm_memremap_pages() mapping
|
* @ref: reference count that pins the devm_memremap_pages() mapping
|
||||||
* @internal_ref: internal reference if @ref is not provided by the caller
|
* @done: completion for @ref
|
||||||
* @done: completion for @internal_ref
|
|
||||||
* @type: memory type: see MEMORY_* in memory_hotplug.h
|
* @type: memory type: see MEMORY_* in memory_hotplug.h
|
||||||
* @flags: PGMAP_* flags to specify defailed behavior
|
* @flags: PGMAP_* flags to specify defailed behavior
|
||||||
* @ops: method table
|
* @ops: method table
|
||||||
@ -109,8 +98,7 @@ struct dev_pagemap_ops {
|
|||||||
*/
|
*/
|
||||||
struct dev_pagemap {
|
struct dev_pagemap {
|
||||||
struct vmem_altmap altmap;
|
struct vmem_altmap altmap;
|
||||||
struct percpu_ref *ref;
|
struct percpu_ref ref;
|
||||||
struct percpu_ref internal_ref;
|
|
||||||
struct completion done;
|
struct completion done;
|
||||||
enum memory_type type;
|
enum memory_type type;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
@ -191,7 +179,7 @@ static inline unsigned long memremap_compat_align(void)
|
|||||||
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
|
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
|
||||||
{
|
{
|
||||||
if (pgmap)
|
if (pgmap)
|
||||||
percpu_ref_put(pgmap->ref);
|
percpu_ref_put(&pgmap->ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _LINUX_MEMREMAP_H_ */
|
#endif /* _LINUX_MEMREMAP_H_ */
|
||||||
|
@ -112,30 +112,6 @@ static unsigned long pfn_next(unsigned long pfn)
|
|||||||
#define for_each_device_pfn(pfn, map, i) \
|
#define for_each_device_pfn(pfn, map, i) \
|
||||||
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
|
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
|
||||||
|
|
||||||
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
if (pgmap->ops && pgmap->ops->kill)
|
|
||||||
pgmap->ops->kill(pgmap);
|
|
||||||
else
|
|
||||||
percpu_ref_kill(pgmap->ref);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
if (pgmap->ops && pgmap->ops->cleanup) {
|
|
||||||
pgmap->ops->cleanup(pgmap);
|
|
||||||
} else {
|
|
||||||
wait_for_completion(&pgmap->done);
|
|
||||||
percpu_ref_exit(pgmap->ref);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Undo the pgmap ref assignment for the internal case as the
|
|
||||||
* caller may re-enable the same pgmap.
|
|
||||||
*/
|
|
||||||
if (pgmap->ref == &pgmap->internal_ref)
|
|
||||||
pgmap->ref = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
|
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
|
||||||
{
|
{
|
||||||
struct range *range = &pgmap->ranges[range_id];
|
struct range *range = &pgmap->ranges[range_id];
|
||||||
@ -167,11 +143,12 @@ void memunmap_pages(struct dev_pagemap *pgmap)
|
|||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dev_pagemap_kill(pgmap);
|
percpu_ref_kill(&pgmap->ref);
|
||||||
for (i = 0; i < pgmap->nr_range; i++)
|
for (i = 0; i < pgmap->nr_range; i++)
|
||||||
for_each_device_pfn(pfn, pgmap, i)
|
for_each_device_pfn(pfn, pgmap, i)
|
||||||
put_page(pfn_to_page(pfn));
|
put_page(pfn_to_page(pfn));
|
||||||
dev_pagemap_cleanup(pgmap);
|
wait_for_completion(&pgmap->done);
|
||||||
|
percpu_ref_exit(&pgmap->ref);
|
||||||
|
|
||||||
for (i = 0; i < pgmap->nr_range; i++)
|
for (i = 0; i < pgmap->nr_range; i++)
|
||||||
pageunmap_range(pgmap, i);
|
pageunmap_range(pgmap, i);
|
||||||
@ -188,8 +165,7 @@ static void devm_memremap_pages_release(void *data)
|
|||||||
|
|
||||||
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
|
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct dev_pagemap *pgmap =
|
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
|
||||||
container_of(ref, struct dev_pagemap, internal_ref);
|
|
||||||
|
|
||||||
complete(&pgmap->done);
|
complete(&pgmap->done);
|
||||||
}
|
}
|
||||||
@ -295,8 +271,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
|
|||||||
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
||||||
PHYS_PFN(range->start),
|
PHYS_PFN(range->start),
|
||||||
PHYS_PFN(range_len(range)), pgmap);
|
PHYS_PFN(range_len(range)), pgmap);
|
||||||
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
|
percpu_ref_get_many(&pgmap->ref,
|
||||||
- pfn_first(pgmap, range_id));
|
pfn_end(pgmap, range_id) - pfn_first(pgmap, range_id));
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_add_memory:
|
err_add_memory:
|
||||||
@ -362,22 +338,11 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pgmap->ref) {
|
init_completion(&pgmap->done);
|
||||||
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
|
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
|
||||||
return ERR_PTR(-EINVAL);
|
GFP_KERNEL);
|
||||||
|
if (error)
|
||||||
init_completion(&pgmap->done);
|
return ERR_PTR(error);
|
||||||
error = percpu_ref_init(&pgmap->internal_ref,
|
|
||||||
dev_pagemap_percpu_release, 0, GFP_KERNEL);
|
|
||||||
if (error)
|
|
||||||
return ERR_PTR(error);
|
|
||||||
pgmap->ref = &pgmap->internal_ref;
|
|
||||||
} else {
|
|
||||||
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
|
|
||||||
WARN(1, "Missing reference count teardown definition\n");
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
devmap_managed_enable_get(pgmap);
|
devmap_managed_enable_get(pgmap);
|
||||||
|
|
||||||
@ -486,7 +451,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
|
|||||||
/* fall back to slow path lookup */
|
/* fall back to slow path lookup */
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
|
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
|
||||||
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
|
if (pgmap && !percpu_ref_tryget_live(&pgmap->ref))
|
||||||
pgmap = NULL;
|
pgmap = NULL;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
@ -100,25 +100,17 @@ static void nfit_test_kill(void *_pgmap)
|
|||||||
{
|
{
|
||||||
struct dev_pagemap *pgmap = _pgmap;
|
struct dev_pagemap *pgmap = _pgmap;
|
||||||
|
|
||||||
WARN_ON(!pgmap || !pgmap->ref);
|
WARN_ON(!pgmap);
|
||||||
|
|
||||||
if (pgmap->ops && pgmap->ops->kill)
|
percpu_ref_kill(&pgmap->ref);
|
||||||
pgmap->ops->kill(pgmap);
|
|
||||||
else
|
|
||||||
percpu_ref_kill(pgmap->ref);
|
|
||||||
|
|
||||||
if (pgmap->ops && pgmap->ops->cleanup) {
|
wait_for_completion(&pgmap->done);
|
||||||
pgmap->ops->cleanup(pgmap);
|
percpu_ref_exit(&pgmap->ref);
|
||||||
} else {
|
|
||||||
wait_for_completion(&pgmap->done);
|
|
||||||
percpu_ref_exit(pgmap->ref);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
|
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct dev_pagemap *pgmap =
|
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
|
||||||
container_of(ref, struct dev_pagemap, internal_ref);
|
|
||||||
|
|
||||||
complete(&pgmap->done);
|
complete(&pgmap->done);
|
||||||
}
|
}
|
||||||
@ -132,22 +124,11 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||||||
if (!nfit_res)
|
if (!nfit_res)
|
||||||
return devm_memremap_pages(dev, pgmap);
|
return devm_memremap_pages(dev, pgmap);
|
||||||
|
|
||||||
if (!pgmap->ref) {
|
init_completion(&pgmap->done);
|
||||||
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
|
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
|
||||||
return ERR_PTR(-EINVAL);
|
GFP_KERNEL);
|
||||||
|
if (error)
|
||||||
init_completion(&pgmap->done);
|
return ERR_PTR(error);
|
||||||
error = percpu_ref_init(&pgmap->internal_ref,
|
|
||||||
dev_pagemap_percpu_release, 0, GFP_KERNEL);
|
|
||||||
if (error)
|
|
||||||
return ERR_PTR(error);
|
|
||||||
pgmap->ref = &pgmap->internal_ref;
|
|
||||||
} else {
|
|
||||||
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
|
|
||||||
WARN(1, "Missing reference count teardown definition\n");
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
|
error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
|
||||||
if (error)
|
if (error)
|
||||||
|
Loading…
Reference in New Issue
Block a user