commit67feaba413
upstream. The "hmem" platform-devices that are created to represent the platform-advertised "Soft Reserved" memory ranges end up inserting a resource that causes the iomem_resource tree to look like this: 340000000-43fffffff : hmem.0 340000000-43fffffff : Soft Reserved 340000000-43fffffff : dax0.0 This is because insert_resource() reparents ranges when they completely intersect an existing range. This matters because code that uses region_intersects() to scan for a given IORES_DESC will only check that top-level 'hmem.0' resource and not the 'Soft Reserved' descendant. So, to support EINJ (via einj_error_inject()) to inject errors into memory hosted by a dax-device, be sure to describe the memory as IORES_DESC_SOFT_RESERVED. This is a follow-on to: commitb13a3e5fd4
("ACPI: APEI: Fix _EINJ vs EFI_MEMORY_SP") ...that fixed EINJ support for "Soft Reserved" ranges in the first instance. Fixes:262b45ae3a
("x86/efi: EFI soft reservation to E820 enumeration") Reported-by: Ricardo Sandoval Torres <ricardo.sandoval.torres@intel.com> Tested-by: Ricardo Sandoval Torres <ricardo.sandoval.torres@intel.com> Cc: <stable@vger.kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Omar Avelar <omar.avelar@intel.com> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Mark Gross <markgross@kernel.org> Link: https://lore.kernel.org/r/166397075670.389916.7435722208896316387.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
102 lines
2.3 KiB
C
102 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/platform_device.h>
|
|
#include <linux/memregion.h>
|
|
#include <linux/module.h>
|
|
#include <linux/dax.h>
|
|
#include <linux/mm.h>
|
|
|
|
static bool nohmem;
|
|
module_param_named(disable, nohmem, bool, 0444);
|
|
|
|
void hmem_register_device(int target_nid, struct resource *r)
|
|
{
|
|
/* define a clean / non-busy resource for the platform device */
|
|
struct resource res = {
|
|
.start = r->start,
|
|
.end = r->end,
|
|
.flags = IORESOURCE_MEM,
|
|
.desc = IORES_DESC_SOFT_RESERVED,
|
|
};
|
|
struct platform_device *pdev;
|
|
struct memregion_info info;
|
|
int rc, id;
|
|
|
|
if (nohmem)
|
|
return;
|
|
|
|
rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
|
|
IORES_DESC_SOFT_RESERVED);
|
|
if (rc != REGION_INTERSECTS)
|
|
return;
|
|
|
|
id = memregion_alloc(GFP_KERNEL);
|
|
if (id < 0) {
|
|
pr_err("memregion allocation failure for %pr\n", &res);
|
|
return;
|
|
}
|
|
|
|
pdev = platform_device_alloc("hmem", id);
|
|
if (!pdev) {
|
|
pr_err("hmem device allocation failure for %pr\n", &res);
|
|
goto out_pdev;
|
|
}
|
|
|
|
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
|
info = (struct memregion_info) {
|
|
.target_node = target_nid,
|
|
};
|
|
rc = platform_device_add_data(pdev, &info, sizeof(info));
|
|
if (rc < 0) {
|
|
pr_err("hmem memregion_info allocation failure for %pr\n", &res);
|
|
goto out_pdev;
|
|
}
|
|
|
|
rc = platform_device_add_resources(pdev, &res, 1);
|
|
if (rc < 0) {
|
|
pr_err("hmem resource allocation failure for %pr\n", &res);
|
|
goto out_resource;
|
|
}
|
|
|
|
rc = platform_device_add(pdev);
|
|
if (rc < 0) {
|
|
dev_err(&pdev->dev, "device add failed for %pr\n", &res);
|
|
goto out_resource;
|
|
}
|
|
|
|
return;
|
|
|
|
out_resource:
|
|
put_device(&pdev->dev);
|
|
out_pdev:
|
|
memregion_free(id);
|
|
}
|
|
|
|
static __init int hmem_register_one(struct resource *res, void *data)
|
|
{
|
|
/*
|
|
* If the resource is not a top-level resource it was already
|
|
* assigned to a device by the HMAT parsing.
|
|
*/
|
|
if (res->parent != &iomem_resource) {
|
|
pr_info("HMEM: skip %pr, already claimed\n", res);
|
|
return 0;
|
|
}
|
|
|
|
hmem_register_device(phys_to_target_node(res->start), res);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __init int hmem_init(void)
|
|
{
|
|
walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
|
|
IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* As this is a fallback for address ranges unclaimed by the ACPI HMAT
|
|
* parsing it must be at an initcall level greater than hmat_init().
|
|
*/
|
|
late_initcall(hmem_init);
|