2021-06-16 02:18:17 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
# include <linux/libnvdimm.h>
2021-09-10 01:08:15 +03:00
# include <asm/unaligned.h>
2021-06-16 02:18:17 +03:00
# include <linux/device.h>
# include <linux/module.h>
2021-06-16 02:36:31 +03:00
# include <linux/ndctl.h>
# include <linux/async.h>
2021-06-16 02:18:17 +03:00
# include <linux/slab.h>
2022-01-11 19:06:40 +03:00
# include <linux/nd.h>
2021-08-02 20:29:38 +03:00
# include "cxlmem.h"
2021-06-16 02:18:17 +03:00
# include "cxl.h"
2021-09-14 22:03:04 +03:00
static __read_mostly DECLARE_BITMAP ( exclusive_cmds , CXL_MEM_COMMAND_ID_MAX ) ;
2023-06-15 04:30:02 +03:00
static void clear_exclusive ( void * mds )
2021-09-14 22:03:04 +03:00
{
2023-06-15 04:30:02 +03:00
clear_exclusive_cxl_commands ( mds , exclusive_cmds ) ;
2021-09-14 22:03:04 +03:00
}
2021-06-16 02:36:31 +03:00
static void unregister_nvdimm ( void * nvdimm )
{
nvdimm_delete ( nvdimm ) ;
}
2022-11-30 22:23:01 +03:00
static ssize_t provider_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
return sysfs_emit ( buf , " %s \n " , dev_name ( & cxl_nvd - > dev ) ) ;
}
static DEVICE_ATTR_RO ( provider ) ;
2022-11-30 22:22:50 +03:00
static ssize_t id_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_dev_state * cxlds = cxl_nvd - > cxlmd - > cxlds ;
return sysfs_emit ( buf , " %lld \n " , cxlds - > serial ) ;
}
static DEVICE_ATTR_RO ( id ) ;
static struct attribute * cxl_dimm_attributes [ ] = {
& dev_attr_id . attr ,
2022-11-30 22:23:01 +03:00
& dev_attr_provider . attr ,
2022-11-30 22:22:50 +03:00
NULL
} ;
static const struct attribute_group cxl_dimm_attribute_group = {
. name = " cxl " ,
. attrs = cxl_dimm_attributes ,
} ;
static const struct attribute_group * cxl_dimm_attribute_groups [ ] = {
& cxl_dimm_attribute_group ,
NULL
} ;
2021-06-16 02:36:31 +03:00
static int cxl_nvdimm_probe ( struct device * dev )
{
struct cxl_nvdimm * cxl_nvd = to_cxl_nvdimm ( dev ) ;
2021-09-14 22:03:04 +03:00
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
struct cxl_nvdimm_bridge * cxl_nvb = cxlmd - > cxl_nvb ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2021-09-10 01:08:15 +03:00
unsigned long flags = 0 , cmd_mask = 0 ;
2021-06-16 02:36:31 +03:00
struct nvdimm * nvdimm ;
2021-09-14 22:03:04 +03:00
int rc ;
2021-06-16 02:36:31 +03:00
2023-06-15 04:30:02 +03:00
set_exclusive_cxl_commands ( mds , exclusive_cmds ) ;
rc = devm_add_action_or_reset ( dev , clear_exclusive , mds ) ;
2021-09-14 22:03:04 +03:00
if ( rc )
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
return rc ;
2021-06-16 02:36:31 +03:00
set_bit ( NDD_LABELING , & flags ) ;
2023-02-14 04:01:05 +03:00
set_bit ( NDD_REGISTER_SYNC , & flags ) ;
2021-09-10 01:08:15 +03:00
set_bit ( ND_CMD_GET_CONFIG_SIZE , & cmd_mask ) ;
set_bit ( ND_CMD_GET_CONFIG_DATA , & cmd_mask ) ;
set_bit ( ND_CMD_SET_CONFIG_DATA , & cmd_mask ) ;
2022-11-30 22:22:50 +03:00
nvdimm = __nvdimm_create ( cxl_nvb - > nvdimm_bus , cxl_nvd ,
cxl_dimm_attribute_groups , flags ,
2022-12-02 01:03:19 +03:00
cmd_mask , 0 , NULL , cxl_nvd - > dev_id ,
cxl_security_ops , NULL ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
if ( ! nvdimm )
return - ENOMEM ;
2021-06-16 02:36:31 +03:00
2021-09-14 22:03:04 +03:00
dev_set_drvdata ( dev , nvdimm ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
return devm_add_action_or_reset ( dev , unregister_nvdimm , nvdimm ) ;
2021-06-16 02:36:31 +03:00
}
static struct cxl_driver cxl_nvdimm_driver = {
. name = " cxl_nvdimm " ,
. probe = cxl_nvdimm_probe ,
. id = CXL_DEVICE_NVDIMM ,
2022-12-02 00:33:26 +03:00
. drv = {
. suppress_bind_attrs = true ,
} ,
2021-06-16 02:36:31 +03:00
} ;
2023-06-15 04:30:02 +03:00
static int cxl_pmem_get_config_size ( struct cxl_memdev_state * mds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_get_config_size * cmd ,
unsigned int buf_len )
{
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
2023-06-15 04:30:02 +03:00
* cmd = ( struct nd_cmd_get_config_size ) {
. config_size = mds - > lsa_size ,
. max_xfer =
mds - > payload_size - sizeof ( struct cxl_mbox_set_lsa ) ,
2021-09-10 01:08:15 +03:00
} ;
return 0 ;
}
2023-06-15 04:30:02 +03:00
static int cxl_pmem_get_config_data ( struct cxl_memdev_state * mds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_get_config_data_hdr * cmd ,
unsigned int buf_len )
{
2021-09-09 08:13:15 +03:00
struct cxl_mbox_get_lsa get_lsa ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2021-09-10 01:08:15 +03:00
int rc ;
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
if ( struct_size ( cmd , out_buf , cmd - > in_length ) > buf_len )
return - EINVAL ;
get_lsa = ( struct cxl_mbox_get_lsa ) {
2022-02-26 01:14:56 +03:00
. offset = cpu_to_le32 ( cmd - > in_offset ) ,
. length = cpu_to_le32 ( cmd - > in_length ) ,
2021-09-10 01:08:15 +03:00
} ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_GET_LSA ,
. payload_in = & get_lsa ,
. size_in = sizeof ( get_lsa ) ,
. size_out = cmd - > in_length ,
. payload_out = cmd - > out_buf ,
} ;
2021-09-10 01:08:15 +03:00
2023-06-15 04:30:02 +03:00
rc = cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2021-09-10 01:08:15 +03:00
cmd - > status = 0 ;
return rc ;
}
2023-06-15 04:30:02 +03:00
static int cxl_pmem_set_config_data ( struct cxl_memdev_state * mds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_set_config_hdr * cmd ,
unsigned int buf_len )
{
2021-09-09 08:13:15 +03:00
struct cxl_mbox_set_lsa * set_lsa ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2021-09-10 01:08:15 +03:00
int rc ;
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
/* 4-byte status follows the input data in the payload */
2022-09-27 10:02:47 +03:00
if ( size_add ( struct_size ( cmd , in_buf , cmd - > in_length ) , 4 ) > buf_len )
2021-09-10 01:08:15 +03:00
return - EINVAL ;
set_lsa =
kvzalloc ( struct_size ( set_lsa , data , cmd - > in_length ) , GFP_KERNEL ) ;
if ( ! set_lsa )
return - ENOMEM ;
* set_lsa = ( struct cxl_mbox_set_lsa ) {
2022-02-26 01:14:56 +03:00
. offset = cpu_to_le32 ( cmd - > in_offset ) ,
2021-09-10 01:08:15 +03:00
} ;
memcpy ( set_lsa - > data , cmd - > in_buf , cmd - > in_length ) ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_SET_LSA ,
. payload_in = set_lsa ,
. size_in = struct_size ( set_lsa , data , cmd - > in_length ) ,
} ;
2021-09-10 01:08:15 +03:00
2023-06-15 04:30:02 +03:00
rc = cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2021-09-10 01:08:15 +03:00
/*
* Set " firmware " status ( 4 - packed bytes at the end of the input
* payload .
*/
put_unaligned ( 0 , ( u32 * ) & cmd - > in_buf [ cmd - > in_length ] ) ;
kvfree ( set_lsa ) ;
return rc ;
}
static int cxl_pmem_nvdimm_ctl ( struct nvdimm * nvdimm , unsigned int cmd ,
void * buf , unsigned int buf_len )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
unsigned long cmd_mask = nvdimm_cmd_mask ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2021-09-10 01:08:15 +03:00
if ( ! test_bit ( cmd , & cmd_mask ) )
return - ENOTTY ;
switch ( cmd ) {
case ND_CMD_GET_CONFIG_SIZE :
2023-06-15 04:30:02 +03:00
return cxl_pmem_get_config_size ( mds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
case ND_CMD_GET_CONFIG_DATA :
2023-06-15 04:30:02 +03:00
return cxl_pmem_get_config_data ( mds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
case ND_CMD_SET_CONFIG_DATA :
2023-06-15 04:30:02 +03:00
return cxl_pmem_set_config_data ( mds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
default :
return - ENOTTY ;
}
}
2021-06-16 02:18:17 +03:00
static int cxl_pmem_ctl ( struct nvdimm_bus_descriptor * nd_desc ,
struct nvdimm * nvdimm , unsigned int cmd , void * buf ,
unsigned int buf_len , int * cmd_rc )
{
2021-09-10 01:08:15 +03:00
/*
* No firmware response to translate , let the transport error
* code take precedence .
*/
* cmd_rc = 0 ;
if ( ! nvdimm )
return - ENOTTY ;
return cxl_pmem_nvdimm_ctl ( nvdimm , cmd , buf , buf_len ) ;
2021-06-16 02:18:17 +03:00
}
2023-01-21 03:26:12 +03:00
static int detach_nvdimm ( struct device * dev , void * data )
{
struct cxl_nvdimm * cxl_nvd ;
bool release = false ;
if ( ! is_cxl_nvdimm ( dev ) )
return 0 ;
device_lock ( dev ) ;
if ( ! dev - > driver )
goto out ;
cxl_nvd = to_cxl_nvdimm ( dev ) ;
if ( cxl_nvd - > cxlmd & & cxl_nvd - > cxlmd - > cxl_nvb = = data )
release = true ;
out :
device_unlock ( dev ) ;
if ( release )
device_release_driver ( dev ) ;
return 0 ;
}
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
static void unregister_nvdimm_bus ( void * _cxl_nvb )
{
struct cxl_nvdimm_bridge * cxl_nvb = _cxl_nvb ;
struct nvdimm_bus * nvdimm_bus = cxl_nvb - > nvdimm_bus ;
2023-01-21 03:26:12 +03:00
bus_for_each_dev ( & cxl_bus_type , NULL , cxl_nvb , detach_nvdimm ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
cxl_nvb - > nvdimm_bus = NULL ;
nvdimm_bus_unregister ( nvdimm_bus ) ;
}
2021-06-16 02:18:17 +03:00
static int cxl_nvdimm_bridge_probe ( struct device * dev )
{
struct cxl_nvdimm_bridge * cxl_nvb = to_cxl_nvdimm_bridge ( dev ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
cxl_nvb - > nd_desc = ( struct nvdimm_bus_descriptor ) {
. provider_name = " CXL " ,
. module = THIS_MODULE ,
. ndctl = cxl_pmem_ctl ,
} ;
2021-06-16 02:18:17 +03:00
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
cxl_nvb - > nvdimm_bus =
nvdimm_bus_register ( & cxl_nvb - > dev , & cxl_nvb - > nd_desc ) ;
2021-06-16 02:18:17 +03:00
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
if ( ! cxl_nvb - > nvdimm_bus )
return - ENOMEM ;
2021-06-16 02:18:17 +03:00
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
return devm_add_action_or_reset ( dev , unregister_nvdimm_bus , cxl_nvb ) ;
2021-06-16 02:18:17 +03:00
}
static struct cxl_driver cxl_nvdimm_bridge_driver = {
. name = " cxl_nvdimm_bridge " ,
. probe = cxl_nvdimm_bridge_probe ,
. id = CXL_DEVICE_NVDIMM_BRIDGE ,
2022-12-02 00:33:26 +03:00
. drv = {
. suppress_bind_attrs = true ,
} ,
2021-06-16 02:18:17 +03:00
} ;
2022-01-11 19:06:40 +03:00
static void unregister_nvdimm_region ( void * nd_region )
{
2022-11-04 03:30:36 +03:00
nvdimm_region_delete ( nd_region ) ;
}
2022-01-11 19:06:40 +03:00
static void cxlr_pmem_remove_resource ( void * res )
{
remove_resource ( res ) ;
}
struct cxl_pmem_region_info {
u64 offset ;
u64 serial ;
} ;
static int cxl_pmem_region_probe ( struct device * dev )
{
struct nd_mapping_desc mappings [ CXL_DECODER_MAX_INTERLEAVE ] ;
struct cxl_pmem_region * cxlr_pmem = to_cxl_pmem_region ( dev ) ;
struct cxl_region * cxlr = cxlr_pmem - > cxlr ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
struct cxl_nvdimm_bridge * cxl_nvb = cxlr - > cxl_nvb ;
2022-01-11 19:06:40 +03:00
struct cxl_pmem_region_info * info = NULL ;
struct nd_interleave_set * nd_set ;
struct nd_region_desc ndr_desc ;
struct cxl_nvdimm * cxl_nvd ;
struct nvdimm * nvdimm ;
struct resource * res ;
int rc , i = 0 ;
memset ( & mappings , 0 , sizeof ( mappings ) ) ;
memset ( & ndr_desc , 0 , sizeof ( ndr_desc ) ) ;
res = devm_kzalloc ( dev , sizeof ( * res ) , GFP_KERNEL ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
if ( ! res )
return - ENOMEM ;
2022-01-11 19:06:40 +03:00
res - > name = " Persistent Memory " ;
res - > start = cxlr_pmem - > hpa_range . start ;
res - > end = cxlr_pmem - > hpa_range . end ;
res - > flags = IORESOURCE_MEM ;
res - > desc = IORES_DESC_PERSISTENT_MEMORY ;
rc = insert_resource ( & iomem_resource , res ) ;
if ( rc )
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
return rc ;
2022-01-11 19:06:40 +03:00
rc = devm_add_action_or_reset ( dev , cxlr_pmem_remove_resource , res ) ;
if ( rc )
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
return rc ;
2022-01-11 19:06:40 +03:00
ndr_desc . res = res ;
ndr_desc . provider_data = cxlr_pmem ;
ndr_desc . numa_node = memory_add_physaddr_to_nid ( res - > start ) ;
ndr_desc . target_node = phys_to_target_node ( res - > start ) ;
if ( ndr_desc . target_node = = NUMA_NO_NODE ) {
ndr_desc . target_node = ndr_desc . numa_node ;
dev_dbg ( & cxlr - > dev , " changing target node from %d to %d " ,
NUMA_NO_NODE , ndr_desc . target_node ) ;
}
nd_set = devm_kzalloc ( dev , sizeof ( * nd_set ) , GFP_KERNEL ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
if ( ! nd_set )
return - ENOMEM ;
2022-01-11 19:06:40 +03:00
ndr_desc . memregion = cxlr - > id ;
set_bit ( ND_REGION_CXL , & ndr_desc . flags ) ;
set_bit ( ND_REGION_PERSIST_MEMCTRL , & ndr_desc . flags ) ;
info = kmalloc_array ( cxlr_pmem - > nr_mappings , sizeof ( * info ) , GFP_KERNEL ) ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
if ( ! info )
return - ENOMEM ;
2022-01-11 19:06:40 +03:00
for ( i = 0 ; i < cxlr_pmem - > nr_mappings ; i + + ) {
struct cxl_pmem_region_mapping * m = & cxlr_pmem - > mapping [ i ] ;
struct cxl_memdev * cxlmd = m - > cxlmd ;
struct cxl_dev_state * cxlds = cxlmd - > cxlds ;
cxl/pmem: Refactor nvdimm device registration, delete the workqueue
The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and
'struct cxl_pmem_region' manage CXL persistent memory resources. The
bridge represents base platform resources, the nvdimm represents one or
more endpoints, and the region is a collection of nvdimms that
contribute to an assembled address range.
Their relationship is such that a region is torn down if any component
endpoints are removed. All regions and endpoints are torn down if the
foundational bridge device goes down.
A workqueue was deployed to manage these interdependencies, but it is
difficult to reason about, and fragile. A recent attempt to take the CXL
root device lock in the cxl_mem driver was reported by lockdep as
colliding with the flush_work() in the cxl_pmem flows.
Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn
down immediately and hierarchically. A similar change is made to both
the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both
changes are made in the same patch which unfortunately makes the patch
bigger than desired.
Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and
cxl_pmem_region as a devres release action of the bridge device.
Additionally, include a devres release action of the cxl_memdev or
cxl_region device that triggers the bridge's release action if an endpoint
exits before the bridge. I.e. this allows either unplugging the bridge,
or unplugging and endpoint to result in the same cleanup actions.
To keep the patch smaller the cleanup of the now defunct workqueue
infrastructure is saved for a follow-on patch.
Tested-by: Robert Richter <rrichter@amd.com>
Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-02 00:33:37 +03:00
cxl_nvd = cxlmd - > cxl_nvd ;
2022-01-11 19:06:40 +03:00
nvdimm = dev_get_drvdata ( & cxl_nvd - > dev ) ;
if ( ! nvdimm ) {
dev_dbg ( dev , " [%d]: %s: no nvdimm found \n " , i ,
dev_name ( & cxlmd - > dev ) ) ;
rc = - ENODEV ;
2022-11-04 03:30:36 +03:00
goto out_nvd ;
2022-01-11 19:06:40 +03:00
}
2022-11-04 03:30:36 +03:00
2022-01-11 19:06:40 +03:00
m - > cxl_nvd = cxl_nvd ;
mappings [ i ] = ( struct nd_mapping_desc ) {
. nvdimm = nvdimm ,
. start = m - > start ,
. size = m - > size ,
. position = i ,
} ;
info [ i ] . offset = m - > start ;
info [ i ] . serial = cxlds - > serial ;
}
ndr_desc . num_mappings = cxlr_pmem - > nr_mappings ;
ndr_desc . mapping = mappings ;
/*
* TODO enable CXL labels which skip the need for ' interleave - set cookie '
*/
nd_set - > cookie1 =
nd_fletcher64 ( info , sizeof ( * info ) * cxlr_pmem - > nr_mappings , 0 ) ;
nd_set - > cookie2 = nd_set - > cookie1 ;
ndr_desc . nd_set = nd_set ;
cxlr_pmem - > nd_region =
nvdimm_pmem_region_create ( cxl_nvb - > nvdimm_bus , & ndr_desc ) ;
2022-08-03 12:07:50 +03:00
if ( ! cxlr_pmem - > nd_region ) {
rc = - ENOMEM ;
2022-11-04 03:30:36 +03:00
goto out_nvd ;
2022-01-11 19:06:40 +03:00
}
rc = devm_add_action_or_reset ( dev , unregister_nvdimm_region ,
cxlr_pmem - > nd_region ) ;
2022-11-04 03:30:36 +03:00
out_nvd :
2022-01-11 19:06:40 +03:00
kfree ( info ) ;
return rc ;
}
static struct cxl_driver cxl_pmem_region_driver = {
. name = " cxl_pmem_region " ,
. probe = cxl_pmem_region_probe ,
. id = CXL_DEVICE_PMEM_REGION ,
2022-12-02 00:33:26 +03:00
. drv = {
. suppress_bind_attrs = true ,
} ,
2022-01-11 19:06:40 +03:00
} ;
2021-06-16 02:18:17 +03:00
static __init int cxl_pmem_init ( void )
{
int rc ;
2021-09-14 22:03:04 +03:00
set_bit ( CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE , exclusive_cmds ) ;
set_bit ( CXL_MEM_COMMAND_ID_SET_LSA , exclusive_cmds ) ;
2021-06-16 02:18:17 +03:00
rc = cxl_driver_register ( & cxl_nvdimm_bridge_driver ) ;
if ( rc )
2022-12-02 00:33:43 +03:00
return rc ;
2021-06-16 02:36:31 +03:00
rc = cxl_driver_register ( & cxl_nvdimm_driver ) ;
if ( rc )
goto err_nvdimm ;
2021-06-16 02:18:17 +03:00
2022-01-11 19:06:40 +03:00
rc = cxl_driver_register ( & cxl_pmem_region_driver ) ;
if ( rc )
goto err_region ;
2021-06-16 02:18:17 +03:00
return 0 ;
2022-01-11 19:06:40 +03:00
err_region :
cxl_driver_unregister ( & cxl_nvdimm_driver ) ;
2021-06-16 02:36:31 +03:00
err_nvdimm :
cxl_driver_unregister ( & cxl_nvdimm_bridge_driver ) ;
2021-06-16 02:18:17 +03:00
return rc ;
}
static __exit void cxl_pmem_exit ( void )
{
2022-01-11 19:06:40 +03:00
cxl_driver_unregister ( & cxl_pmem_region_driver ) ;
2021-06-16 02:36:31 +03:00
cxl_driver_unregister ( & cxl_nvdimm_driver ) ;
2021-06-16 02:18:17 +03:00
cxl_driver_unregister ( & cxl_nvdimm_bridge_driver ) ;
}
MODULE_LICENSE ( " GPL v2 " ) ;
module_init ( cxl_pmem_init ) ;
module_exit ( cxl_pmem_exit ) ;
MODULE_IMPORT_NS ( CXL ) ;
MODULE_ALIAS_CXL ( CXL_DEVICE_NVDIMM_BRIDGE ) ;
2021-06-16 02:36:31 +03:00
MODULE_ALIAS_CXL ( CXL_DEVICE_NVDIMM ) ;
2022-01-11 19:06:40 +03:00
MODULE_ALIAS_CXL ( CXL_DEVICE_PMEM_REGION ) ;