dax fixes for v6.1-rc8
- Fix duplicate overlapping device-dax instances for HMAT described "Soft Reserved" Memory - Fix missing node targets in the sysfs representation of memory tiers - Remove a confusing variable initialization -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQSbo+XnGs+rwLz9XGXfioYZHlFsZwUCY4q2jAAKCRDfioYZHlFs Z1P/AQCbMguw+Nj0oTj64TxvrJ6JjFbmJXI8YTFuSt7yOK4XLgD+OlH4SmZyQ1rH HSY2kAl1mPKiqdoO0tKwcNtYYrOZtQQ= =4hxx -----END PGP SIGNATURE----- Merge tag 'dax-fixes-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull dax fixes from Dan Williams: "A few bug fixes around the handling of "Soft Reserved" memory and memory tiering information. Linux is starting to enounter more real world systems that deploy an ACPI HMAT to describe different performance classes of memory, as well the "special purpose" (Linux "Soft Reserved") designation from EFI. These fixes result from that testing. It has all appeared in -next for a while with no known issues. - Fix duplicate overlapping device-dax instances for HMAT described "Soft Reserved" Memory - Fix missing node targets in the sysfs representation of memory tiers - Remove a confusing variable initialization" * tag 'dax-fixes-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: device-dax: Fix duplicate 'hmem' device registration ACPI: HMAT: Fix initiator registration for single-initiator systems ACPI: HMAT: remove unnecessary variable initialization
This commit is contained in:
commit
6085bc9579
@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a,
|
||||
{
|
||||
struct memory_initiator *ia;
|
||||
struct memory_initiator *ib;
|
||||
unsigned long *p_nodes = priv;
|
||||
|
||||
ia = list_entry(a, struct memory_initiator, node);
|
||||
ib = list_entry(b, struct memory_initiator, node);
|
||||
|
||||
set_bit(ia->processor_pxm, p_nodes);
|
||||
set_bit(ib->processor_pxm, p_nodes);
|
||||
|
||||
return ia->processor_pxm - ib->processor_pxm;
|
||||
}
|
||||
|
||||
static int initiators_to_nodemask(unsigned long *p_nodes)
|
||||
{
|
||||
struct memory_initiator *initiator;
|
||||
|
||||
if (list_empty(&initiators))
|
||||
return -ENXIO;
|
||||
|
||||
list_for_each_entry(initiator, &initiators, node)
|
||||
set_bit(initiator->processor_pxm, p_nodes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hmat_register_target_initiators(struct memory_target *target)
|
||||
{
|
||||
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
|
||||
@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
|
||||
* initiators.
|
||||
*/
|
||||
bitmap_zero(p_nodes, MAX_NUMNODES);
|
||||
list_sort(p_nodes, &initiators, initiator_cmp);
|
||||
list_sort(NULL, &initiators, initiator_cmp);
|
||||
if (initiators_to_nodemask(p_nodes) < 0)
|
||||
return;
|
||||
|
||||
if (!access0done) {
|
||||
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
|
||||
loc = localities_types[i];
|
||||
@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target)
|
||||
|
||||
/* Access 1 ignores Generic Initiators */
|
||||
bitmap_zero(p_nodes, MAX_NUMNODES);
|
||||
list_sort(p_nodes, &initiators, initiator_cmp);
|
||||
best = 0;
|
||||
if (initiators_to_nodemask(p_nodes) < 0)
|
||||
return;
|
||||
|
||||
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
|
||||
loc = localities_types[i];
|
||||
if (!loc)
|
||||
|
@ -8,6 +8,13 @@
|
||||
static bool nohmem;
|
||||
module_param_named(disable, nohmem, bool, 0444);
|
||||
|
||||
static struct resource hmem_active = {
|
||||
.name = "HMEM devices",
|
||||
.start = 0,
|
||||
.end = -1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
void hmem_register_device(int target_nid, struct resource *r)
|
||||
{
|
||||
/* define a clean / non-busy resource for the platform device */
|
||||
@ -41,6 +48,12 @@ void hmem_register_device(int target_nid, struct resource *r)
|
||||
goto out_pdev;
|
||||
}
|
||||
|
||||
if (!__request_region(&hmem_active, res.start, resource_size(&res),
|
||||
dev_name(&pdev->dev), 0)) {
|
||||
dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
|
||||
goto out_active;
|
||||
}
|
||||
|
||||
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
||||
info = (struct memregion_info) {
|
||||
.target_node = target_nid,
|
||||
@ -66,6 +79,8 @@ void hmem_register_device(int target_nid, struct resource *r)
|
||||
return;
|
||||
|
||||
out_resource:
|
||||
__release_region(&hmem_active, res.start, resource_size(&res));
|
||||
out_active:
|
||||
platform_device_put(pdev);
|
||||
out_pdev:
|
||||
memregion_free(id);
|
||||
@ -73,15 +88,6 @@ out_pdev:
|
||||
|
||||
static __init int hmem_register_one(struct resource *res, void *data)
|
||||
{
|
||||
/*
|
||||
* If the resource is not a top-level resource it was already
|
||||
* assigned to a device by the HMAT parsing.
|
||||
*/
|
||||
if (res->parent != &iomem_resource) {
|
||||
pr_info("HMEM: skip %pr, already claimed\n", res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hmem_register_device(phys_to_target_node(res->start), res);
|
||||
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user