libnvdimm updates for v6.8
- updates to deprecated and changed interfaces - use new cleanup.h features - use new ida interface - kdoc fixes -----BEGIN PGP SIGNATURE----- iIoEABYKADIWIQSgX9xt+GwmrJEQ+euebuN7TNx1MQUCZZ+J/RQcaXJhLndlaW55 QGludGVsLmNvbQAKCRCebuN7TNx1MZmlAQCsMW7RVGfdWw/xAPO+oBnK9k5w5YoY 1sU6p6KqZJMujAD9EQlCdzrEyuVci4rlX/Alvw0q6XWGHFF9XWl6IsYgJgM= =YpQ9 -----END PGP SIGNATURE----- Merge tag 'libnvdimm-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull libnvdimm updates from Ira Weiny: "A mix of bug fixes and updates to interfaces used by nvdimm: - Updates to interfaces include: Use the new scope based management Remove deprecated ida interfaces Update to sysfs_emit() - Fixup kdoc comments" * tag 'libnvdimm-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: acpi/nfit: Use sysfs_emit() for all attributes nvdimm/namespace: fix kernel-doc for function params nvdimm/dimm_devs: fix kernel-doc for function params nvdimm/btt: fix btt_blk_cleanup() kernel-doc nvdimm-btt: simplify code with the scope based resource management nvdimm: Remove usage of the deprecated ida_simple_xx() API ACPI: NFIT: Use cleanup.h helpers instead of devm_*()
This commit is contained in:
commit
a3cc31e751
@ -1186,7 +1186,7 @@ static ssize_t bus_dsm_mask_show(struct device *dev,
|
||||
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
||||
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
||||
|
||||
return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
|
||||
return sysfs_emit(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
|
||||
}
|
||||
static struct device_attribute dev_attr_bus_dsm_mask =
|
||||
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
|
||||
@ -1198,7 +1198,7 @@ static ssize_t revision_show(struct device *dev,
|
||||
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
||||
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
||||
|
||||
return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
|
||||
return sysfs_emit(buf, "%d\n", acpi_desc->acpi_header.revision);
|
||||
}
|
||||
static DEVICE_ATTR_RO(revision);
|
||||
|
||||
@ -1209,7 +1209,7 @@ static ssize_t hw_error_scrub_show(struct device *dev,
|
||||
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
||||
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
||||
|
||||
return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
|
||||
return sysfs_emit(buf, "%d\n", acpi_desc->scrub_mode);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1278,7 +1278,7 @@ static ssize_t scrub_show(struct device *dev,
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
|
||||
&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
||||
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
|
||||
rc = sysfs_emit(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
|
||||
/* Allow an admin to poll the busy state at a higher rate */
|
||||
if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
|
||||
&acpi_desc->scrub_flags)) {
|
||||
@ -1382,7 +1382,7 @@ static ssize_t handle_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
||||
|
||||
return sprintf(buf, "%#x\n", memdev->device_handle);
|
||||
return sysfs_emit(buf, "%#x\n", memdev->device_handle);
|
||||
}
|
||||
static DEVICE_ATTR_RO(handle);
|
||||
|
||||
@ -1391,7 +1391,7 @@ static ssize_t phys_id_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
||||
|
||||
return sprintf(buf, "%#x\n", memdev->physical_id);
|
||||
return sysfs_emit(buf, "%#x\n", memdev->physical_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(phys_id);
|
||||
|
||||
@ -1400,7 +1400,7 @@ static ssize_t vendor_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
|
||||
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(vendor);
|
||||
|
||||
@ -1409,7 +1409,7 @@ static ssize_t rev_id_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
|
||||
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(rev_id);
|
||||
|
||||
@ -1418,7 +1418,7 @@ static ssize_t device_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
|
||||
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(device);
|
||||
|
||||
@ -1427,7 +1427,7 @@ static ssize_t subsystem_vendor_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
|
||||
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(subsystem_vendor);
|
||||
|
||||
@ -1436,7 +1436,7 @@ static ssize_t subsystem_rev_id_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n",
|
||||
return sysfs_emit(buf, "0x%04x\n",
|
||||
be16_to_cpu(dcr->subsystem_revision_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(subsystem_rev_id);
|
||||
@ -1446,7 +1446,7 @@ static ssize_t subsystem_device_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
|
||||
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
|
||||
}
|
||||
static DEVICE_ATTR_RO(subsystem_device);
|
||||
|
||||
@ -1465,7 +1465,7 @@ static ssize_t format_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
|
||||
return sysfs_emit(buf, "0x%04x\n", le16_to_cpu(dcr->code));
|
||||
}
|
||||
static DEVICE_ATTR_RO(format);
|
||||
|
||||
@ -1498,7 +1498,7 @@ static ssize_t format1_show(struct device *dev,
|
||||
continue;
|
||||
if (nfit_dcr->dcr->code == dcr->code)
|
||||
continue;
|
||||
rc = sprintf(buf, "0x%04x\n",
|
||||
rc = sysfs_emit(buf, "0x%04x\n",
|
||||
le16_to_cpu(nfit_dcr->dcr->code));
|
||||
break;
|
||||
}
|
||||
@ -1515,7 +1515,7 @@ static ssize_t formats_show(struct device *dev,
|
||||
{
|
||||
struct nvdimm *nvdimm = to_nvdimm(dev);
|
||||
|
||||
return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
|
||||
return sysfs_emit(buf, "%d\n", num_nvdimm_formats(nvdimm));
|
||||
}
|
||||
static DEVICE_ATTR_RO(formats);
|
||||
|
||||
@ -1524,7 +1524,7 @@ static ssize_t serial_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
|
||||
return sysfs_emit(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
|
||||
}
|
||||
static DEVICE_ATTR_RO(serial);
|
||||
|
||||
@ -1536,7 +1536,7 @@ static ssize_t family_show(struct device *dev,
|
||||
|
||||
if (nfit_mem->family < 0)
|
||||
return -ENXIO;
|
||||
return sprintf(buf, "%d\n", nfit_mem->family);
|
||||
return sysfs_emit(buf, "%d\n", nfit_mem->family);
|
||||
}
|
||||
static DEVICE_ATTR_RO(family);
|
||||
|
||||
@ -1548,7 +1548,7 @@ static ssize_t dsm_mask_show(struct device *dev,
|
||||
|
||||
if (nfit_mem->family < 0)
|
||||
return -ENXIO;
|
||||
return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
|
||||
return sysfs_emit(buf, "%#lx\n", nfit_mem->dsm_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(dsm_mask);
|
||||
|
||||
@ -1562,7 +1562,7 @@ static ssize_t flags_show(struct device *dev,
|
||||
if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
|
||||
flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
|
||||
|
||||
return sprintf(buf, "%s%s%s%s%s%s%s\n",
|
||||
return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
|
||||
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
|
||||
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
|
||||
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
|
||||
@ -1579,7 +1579,7 @@ static ssize_t id_show(struct device *dev,
|
||||
struct nvdimm *nvdimm = to_nvdimm(dev);
|
||||
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
||||
|
||||
return sprintf(buf, "%s\n", nfit_mem->id);
|
||||
return sysfs_emit(buf, "%s\n", nfit_mem->id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(id);
|
||||
|
||||
@ -1589,7 +1589,7 @@ static ssize_t dirty_shutdown_show(struct device *dev,
|
||||
struct nvdimm *nvdimm = to_nvdimm(dev);
|
||||
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
||||
|
||||
return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
|
||||
return sysfs_emit(buf, "%d\n", nfit_mem->dirty_shutdown);
|
||||
}
|
||||
static DEVICE_ATTR_RO(dirty_shutdown);
|
||||
|
||||
@ -2172,7 +2172,7 @@ static ssize_t range_index_show(struct device *dev,
|
||||
struct nd_region *nd_region = to_nd_region(dev);
|
||||
struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
|
||||
|
||||
return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
|
||||
return sysfs_emit(buf, "%d\n", nfit_spa->spa->range_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(range_index);
|
||||
|
||||
@ -2257,26 +2257,23 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
||||
struct nd_region_desc *ndr_desc,
|
||||
struct acpi_nfit_system_address *spa)
|
||||
{
|
||||
u16 nr = ndr_desc->num_mappings;
|
||||
struct nfit_set_info2 *info2 __free(kfree) =
|
||||
kcalloc(nr, sizeof(*info2), GFP_KERNEL);
|
||||
struct nfit_set_info *info __free(kfree) =
|
||||
kcalloc(nr, sizeof(*info), GFP_KERNEL);
|
||||
struct device *dev = acpi_desc->dev;
|
||||
struct nd_interleave_set *nd_set;
|
||||
u16 nr = ndr_desc->num_mappings;
|
||||
struct nfit_set_info2 *info2;
|
||||
struct nfit_set_info *info;
|
||||
int i;
|
||||
|
||||
if (!info || !info2)
|
||||
return -ENOMEM;
|
||||
|
||||
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
|
||||
if (!nd_set)
|
||||
return -ENOMEM;
|
||||
import_guid(&nd_set->type_guid, spa->range_guid);
|
||||
|
||||
info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
|
||||
if (!info2)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
|
||||
struct nvdimm *nvdimm = mapping->nvdimm;
|
||||
@ -2337,8 +2334,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
||||
}
|
||||
|
||||
ndr_desc->nd_set = nd_set;
|
||||
devm_kfree(dev, info);
|
||||
devm_kfree(dev, info2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nd.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include "btt.h"
|
||||
#include "nd.h"
|
||||
|
||||
@ -847,23 +848,20 @@ static int discover_arenas(struct btt *btt)
|
||||
{
|
||||
int ret = 0;
|
||||
struct arena_info *arena;
|
||||
struct btt_sb *super;
|
||||
size_t remaining = btt->rawsize;
|
||||
u64 cur_nlba = 0;
|
||||
size_t cur_off = 0;
|
||||
int num_arenas = 0;
|
||||
|
||||
super = kzalloc(sizeof(*super), GFP_KERNEL);
|
||||
struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
|
||||
if (!super)
|
||||
return -ENOMEM;
|
||||
|
||||
while (remaining) {
|
||||
/* Alloc memory for arena */
|
||||
arena = alloc_arena(btt, 0, 0, 0);
|
||||
if (!arena) {
|
||||
ret = -ENOMEM;
|
||||
goto out_super;
|
||||
}
|
||||
if (!arena)
|
||||
return -ENOMEM;
|
||||
|
||||
arena->infooff = cur_off;
|
||||
ret = btt_info_read(arena, super);
|
||||
@ -919,14 +917,11 @@ static int discover_arenas(struct btt *btt)
|
||||
btt->nlba = cur_nlba;
|
||||
btt->init_state = INIT_READY;
|
||||
|
||||
kfree(super);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
kfree(arena);
|
||||
free_arenas(btt);
|
||||
out_super:
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1550,7 +1545,7 @@ static void btt_blk_cleanup(struct btt *btt)
|
||||
* @rawsize: raw size in bytes of the backing device
|
||||
* @lbasize: lba size of the backing device
|
||||
* @uuid: A uuid for the backing device - this is stored on media
|
||||
* @maxlane: maximum number of parallel requests the device can handle
|
||||
* @nd_region: &struct nd_region for the REGION device
|
||||
*
|
||||
* Initialize a Block Translation Table on a backing device to provide
|
||||
* single sector power fail atomicity.
|
||||
|
@ -19,7 +19,7 @@ static void nd_btt_release(struct device *dev)
|
||||
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
|
||||
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
|
||||
ida_free(&nd_region->btt_ida, nd_btt->id);
|
||||
kfree(nd_btt->uuid);
|
||||
kfree(nd_btt);
|
||||
}
|
||||
@ -191,7 +191,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
|
||||
if (!nd_btt)
|
||||
return NULL;
|
||||
|
||||
nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
|
||||
nd_btt->id = ida_alloc(&nd_region->btt_ida, GFP_KERNEL);
|
||||
if (nd_btt->id < 0)
|
||||
goto out_nd_btt;
|
||||
|
||||
@ -217,7 +217,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
|
||||
return dev;
|
||||
|
||||
out_put_id:
|
||||
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
|
||||
ida_free(&nd_region->btt_ida, nd_btt->id);
|
||||
|
||||
out_nd_btt:
|
||||
kfree(nd_btt);
|
||||
|
@ -285,7 +285,7 @@ static void nvdimm_bus_release(struct device *dev)
|
||||
struct nvdimm_bus *nvdimm_bus;
|
||||
|
||||
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
|
||||
ida_simple_remove(&nd_ida, nvdimm_bus->id);
|
||||
ida_free(&nd_ida, nvdimm_bus->id);
|
||||
kfree(nvdimm_bus);
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
|
||||
INIT_LIST_HEAD(&nvdimm_bus->list);
|
||||
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
|
||||
init_waitqueue_head(&nvdimm_bus->wait);
|
||||
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
|
||||
nvdimm_bus->id = ida_alloc(&nd_ida, GFP_KERNEL);
|
||||
if (nvdimm_bus->id < 0) {
|
||||
kfree(nvdimm_bus);
|
||||
return NULL;
|
||||
|
@ -18,7 +18,7 @@ static void nd_dax_release(struct device *dev)
|
||||
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(dev, &nd_pfn->ndns);
|
||||
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
|
||||
ida_free(&nd_region->dax_ida, nd_pfn->id);
|
||||
kfree(nd_pfn->uuid);
|
||||
kfree(nd_dax);
|
||||
}
|
||||
@ -55,7 +55,7 @@ static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
|
||||
return NULL;
|
||||
|
||||
nd_pfn = &nd_dax->nd_pfn;
|
||||
nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
|
||||
nd_pfn->id = ida_alloc(&nd_region->dax_ida, GFP_KERNEL);
|
||||
if (nd_pfn->id < 0) {
|
||||
kfree(nd_dax);
|
||||
return NULL;
|
||||
|
@ -53,7 +53,9 @@ static int validate_dimm(struct nvdimm_drvdata *ndd)
|
||||
|
||||
/**
|
||||
* nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
|
||||
* @nvdimm: dimm to initialize
|
||||
* @ndd: dimm to initialize
|
||||
*
|
||||
* Returns: %0 if the area is already valid, -errno on error
|
||||
*/
|
||||
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
|
||||
{
|
||||
@ -194,7 +196,7 @@ static void nvdimm_release(struct device *dev)
|
||||
{
|
||||
struct nvdimm *nvdimm = to_nvdimm(dev);
|
||||
|
||||
ida_simple_remove(&dimm_ida, nvdimm->id);
|
||||
ida_free(&dimm_ida, nvdimm->id);
|
||||
kfree(nvdimm);
|
||||
}
|
||||
|
||||
@ -592,7 +594,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
|
||||
if (!nvdimm)
|
||||
return NULL;
|
||||
|
||||
nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
|
||||
nvdimm->id = ida_alloc(&dimm_ida, GFP_KERNEL);
|
||||
if (nvdimm->id < 0) {
|
||||
kfree(nvdimm);
|
||||
return NULL;
|
||||
@ -722,6 +724,9 @@ static unsigned long dpa_align(struct nd_region *nd_region)
|
||||
* contiguous unallocated dpa range.
|
||||
* @nd_region: constrain available space check to this reference region
|
||||
* @nd_mapping: container of dpa-resource-root + labels
|
||||
*
|
||||
* Returns: %0 if there is an alignment error, otherwise the max
|
||||
* unallocated dpa range
|
||||
*/
|
||||
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
|
||||
struct nd_mapping *nd_mapping)
|
||||
@ -767,6 +772,8 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
|
||||
*
|
||||
* Validate that a PMEM label, if present, aligns with the start of an
|
||||
* interleave set.
|
||||
*
|
||||
* Returns: %0 if there is an alignment error, otherwise the unallocated dpa
|
||||
*/
|
||||
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
|
||||
struct nd_mapping *nd_mapping)
|
||||
@ -836,8 +843,10 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
|
||||
|
||||
/**
|
||||
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
|
||||
* @nvdimm: container of dpa-resource-root + labels
|
||||
* @ndd: container of dpa-resource-root + labels
|
||||
* @label_id: dpa resource name of the form pmem-<human readable uuid>
|
||||
*
|
||||
* Returns: sum of the dpa allocated to the label_id
|
||||
*/
|
||||
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
|
||||
struct nd_label_id *label_id)
|
||||
|
@ -27,7 +27,7 @@ static void namespace_pmem_release(struct device *dev)
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
|
||||
if (nspm->id >= 0)
|
||||
ida_simple_remove(&nd_region->ns_ida, nspm->id);
|
||||
ida_free(&nd_region->ns_ida, nspm->id);
|
||||
kfree(nspm->alt_name);
|
||||
kfree(nspm->uuid);
|
||||
kfree(nspm);
|
||||
@ -71,6 +71,8 @@ static int is_namespace_uuid_busy(struct device *dev, void *data)
|
||||
* nd_is_uuid_unique - verify that no other namespace has @uuid
|
||||
* @dev: any device on a nvdimm_bus
|
||||
* @uuid: uuid to check
|
||||
*
|
||||
* Returns: %true if the uuid is unique, %false if not
|
||||
*/
|
||||
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
|
||||
{
|
||||
@ -337,6 +339,8 @@ static int scan_free(struct nd_region *nd_region,
|
||||
* adjust_resource() the allocation to @n, but if @n is larger than the
|
||||
* allocation delete it and find the 'new' last allocation in the label
|
||||
* set.
|
||||
*
|
||||
* Returns: %0 on success on -errno on error
|
||||
*/
|
||||
static int shrink_dpa_allocation(struct nd_region *nd_region,
|
||||
struct nd_label_id *label_id, resource_size_t n)
|
||||
@ -662,6 +666,8 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
|
||||
* allocations from the start of an interleave set and end at the first
|
||||
* BLK allocation or the end of the interleave set, whichever comes
|
||||
* first.
|
||||
*
|
||||
* Returns: %0 on success on -errno on error
|
||||
*/
|
||||
static int grow_dpa_allocation(struct nd_region *nd_region,
|
||||
struct nd_label_id *label_id, resource_size_t n)
|
||||
@ -951,6 +957,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
||||
* @dev: namespace type for generating label_id
|
||||
* @new_uuid: incoming uuid
|
||||
* @old_uuid: reference to the uuid storage location in the namespace object
|
||||
*
|
||||
* Returns: %0 on success on -errno on error
|
||||
*/
|
||||
static int namespace_update_uuid(struct nd_region *nd_region,
|
||||
struct device *dev, uuid_t *new_uuid,
|
||||
@ -1656,8 +1664,10 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
|
||||
/**
|
||||
* create_namespace_pmem - validate interleave set labelling, retrieve label0
|
||||
* @nd_region: region with mappings to validate
|
||||
* @nspm: target namespace to create
|
||||
* @nd_mapping: container of dpa-resource-root + labels
|
||||
* @nd_label: target pmem namespace label to evaluate
|
||||
*
|
||||
* Returns: the created &struct device on success or ERR_PTR(-errno) on error
|
||||
*/
|
||||
static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
||||
struct nd_mapping *nd_mapping,
|
||||
@ -1810,7 +1820,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
|
||||
res->name = dev_name(&nd_region->dev);
|
||||
res->flags = IORESOURCE_MEM;
|
||||
|
||||
nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
|
||||
nspm->id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
|
||||
if (nspm->id < 0) {
|
||||
kfree(nspm);
|
||||
return NULL;
|
||||
@ -2188,8 +2198,7 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
|
||||
struct nd_namespace_pmem *nspm;
|
||||
|
||||
nspm = to_nd_namespace_pmem(dev);
|
||||
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
|
||||
GFP_KERNEL);
|
||||
id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
|
||||
nspm->id = id;
|
||||
} else
|
||||
id = i;
|
||||
|
@ -22,7 +22,7 @@ static void nd_pfn_release(struct device *dev)
|
||||
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
|
||||
ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
|
||||
ida_free(&nd_region->pfn_ida, nd_pfn->id);
|
||||
kfree(nd_pfn->uuid);
|
||||
kfree(nd_pfn);
|
||||
}
|
||||
@ -326,7 +326,7 @@ static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
|
||||
if (!nd_pfn)
|
||||
return NULL;
|
||||
|
||||
nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
|
||||
nd_pfn->id = ida_alloc(&nd_region->pfn_ida, GFP_KERNEL);
|
||||
if (nd_pfn->id < 0) {
|
||||
kfree(nd_pfn);
|
||||
return NULL;
|
||||
|
Loading…
x
Reference in New Issue
Block a user