2021-06-16 02:18:17 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
# include <linux/libnvdimm.h>
2021-09-10 01:08:15 +03:00
# include <asm/unaligned.h>
2021-06-16 02:18:17 +03:00
# include <linux/device.h>
# include <linux/module.h>
2021-06-16 02:36:31 +03:00
# include <linux/ndctl.h>
# include <linux/async.h>
2021-06-16 02:18:17 +03:00
# include <linux/slab.h>
2022-01-11 19:06:40 +03:00
# include <linux/nd.h>
2021-08-02 20:29:38 +03:00
# include "cxlmem.h"
2021-06-16 02:18:17 +03:00
# include "cxl.h"
/*
* Ordered workqueue for cxl nvdimm device arrival and departure
* to coordinate bus rescans when a bridge arrives and trigger remove
* operations when the bridge is removed .
*/
static struct workqueue_struct * cxl_pmem_wq ;
2021-09-14 22:03:04 +03:00
static __read_mostly DECLARE_BITMAP ( exclusive_cmds , CXL_MEM_COMMAND_ID_MAX ) ;
2021-11-02 23:29:01 +03:00
static void clear_exclusive ( void * cxlds )
2021-09-14 22:03:04 +03:00
{
2021-11-02 23:29:01 +03:00
clear_exclusive_cxl_commands ( cxlds , exclusive_cmds ) ;
2021-09-14 22:03:04 +03:00
}
2021-06-16 02:36:31 +03:00
static void unregister_nvdimm ( void * nvdimm )
{
2022-06-11 03:39:59 +03:00
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
2022-01-11 19:06:40 +03:00
struct cxl_nvdimm_bridge * cxl_nvb = cxl_nvd - > bridge ;
struct cxl_pmem_region * cxlr_pmem ;
2022-11-04 03:30:36 +03:00
unsigned long index ;
2022-01-11 19:06:40 +03:00
device_lock ( & cxl_nvb - > dev ) ;
dev_set_drvdata ( & cxl_nvd - > dev , NULL ) ;
2022-11-04 03:30:36 +03:00
xa_for_each ( & cxl_nvd - > pmem_regions , index , cxlr_pmem ) {
get_device ( & cxlr_pmem - > dev ) ;
device_unlock ( & cxl_nvb - > dev ) ;
2022-01-11 19:06:40 +03:00
device_release_driver ( & cxlr_pmem - > dev ) ;
put_device ( & cxlr_pmem - > dev ) ;
2022-11-04 03:30:36 +03:00
device_lock ( & cxl_nvb - > dev ) ;
2022-01-11 19:06:40 +03:00
}
2022-11-04 03:30:36 +03:00
device_unlock ( & cxl_nvb - > dev ) ;
2022-06-11 03:39:59 +03:00
2021-06-16 02:36:31 +03:00
nvdimm_delete ( nvdimm ) ;
2022-06-11 03:39:59 +03:00
cxl_nvd - > bridge = NULL ;
2021-06-16 02:36:31 +03:00
}
static int cxl_nvdimm_probe ( struct device * dev )
{
struct cxl_nvdimm * cxl_nvd = to_cxl_nvdimm ( dev ) ;
2021-09-14 22:03:04 +03:00
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2021-09-10 01:08:15 +03:00
unsigned long flags = 0 , cmd_mask = 0 ;
2021-11-02 23:29:01 +03:00
struct cxl_dev_state * cxlds = cxlmd - > cxlds ;
2021-06-16 02:36:31 +03:00
struct cxl_nvdimm_bridge * cxl_nvb ;
struct nvdimm * nvdimm ;
2021-09-14 22:03:04 +03:00
int rc ;
2021-06-16 02:36:31 +03:00
2022-01-11 19:06:40 +03:00
cxl_nvb = cxl_find_nvdimm_bridge ( dev ) ;
2021-06-16 02:36:31 +03:00
if ( ! cxl_nvb )
return - ENXIO ;
2022-04-21 18:33:23 +03:00
device_lock ( & cxl_nvb - > dev ) ;
2021-09-14 22:03:04 +03:00
if ( ! cxl_nvb - > nvdimm_bus ) {
rc = - ENXIO ;
goto out ;
}
2021-11-02 23:29:01 +03:00
set_exclusive_cxl_commands ( cxlds , exclusive_cmds ) ;
rc = devm_add_action_or_reset ( dev , clear_exclusive , cxlds ) ;
2021-09-14 22:03:04 +03:00
if ( rc )
2021-06-16 02:36:31 +03:00
goto out ;
set_bit ( NDD_LABELING , & flags ) ;
2021-09-10 01:08:15 +03:00
set_bit ( ND_CMD_GET_CONFIG_SIZE , & cmd_mask ) ;
set_bit ( ND_CMD_GET_CONFIG_DATA , & cmd_mask ) ;
set_bit ( ND_CMD_SET_CONFIG_DATA , & cmd_mask ) ;
nvdimm = nvdimm_create ( cxl_nvb - > nvdimm_bus , cxl_nvd , NULL , flags ,
cmd_mask , 0 , NULL ) ;
2021-09-14 22:03:04 +03:00
if ( ! nvdimm ) {
rc = - ENOMEM ;
2021-06-16 02:36:31 +03:00
goto out ;
2021-09-14 22:03:04 +03:00
}
2021-06-16 02:36:31 +03:00
2021-09-14 22:03:04 +03:00
dev_set_drvdata ( dev , nvdimm ) ;
2022-06-11 03:39:59 +03:00
cxl_nvd - > bridge = cxl_nvb ;
2021-06-16 02:36:31 +03:00
rc = devm_add_action_or_reset ( dev , unregister_nvdimm , nvdimm ) ;
out :
2022-04-21 18:33:23 +03:00
device_unlock ( & cxl_nvb - > dev ) ;
2021-06-16 02:36:31 +03:00
put_device ( & cxl_nvb - > dev ) ;
return rc ;
}
static struct cxl_driver cxl_nvdimm_driver = {
. name = " cxl_nvdimm " ,
. probe = cxl_nvdimm_probe ,
. id = CXL_DEVICE_NVDIMM ,
} ;
2021-11-02 23:29:01 +03:00
static int cxl_pmem_get_config_size ( struct cxl_dev_state * cxlds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_get_config_size * cmd ,
unsigned int buf_len )
{
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
* cmd = ( struct nd_cmd_get_config_size ) {
2021-11-02 23:29:01 +03:00
. config_size = cxlds - > lsa_size ,
2022-08-15 18:40:44 +03:00
. max_xfer = cxlds - > payload_size - sizeof ( struct cxl_mbox_set_lsa ) ,
2021-09-10 01:08:15 +03:00
} ;
return 0 ;
}
2021-11-02 23:29:01 +03:00
static int cxl_pmem_get_config_data ( struct cxl_dev_state * cxlds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_get_config_data_hdr * cmd ,
unsigned int buf_len )
{
2021-09-09 08:13:15 +03:00
struct cxl_mbox_get_lsa get_lsa ;
2021-09-10 01:08:15 +03:00
int rc ;
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
if ( struct_size ( cmd , out_buf , cmd - > in_length ) > buf_len )
return - EINVAL ;
get_lsa = ( struct cxl_mbox_get_lsa ) {
2022-02-26 01:14:56 +03:00
. offset = cpu_to_le32 ( cmd - > in_offset ) ,
. length = cpu_to_le32 ( cmd - > in_length ) ,
2021-09-10 01:08:15 +03:00
} ;
2021-11-02 23:29:01 +03:00
rc = cxl_mbox_send_cmd ( cxlds , CXL_MBOX_OP_GET_LSA , & get_lsa ,
sizeof ( get_lsa ) , cmd - > out_buf , cmd - > in_length ) ;
2021-09-10 01:08:15 +03:00
cmd - > status = 0 ;
return rc ;
}
2021-11-02 23:29:01 +03:00
static int cxl_pmem_set_config_data ( struct cxl_dev_state * cxlds ,
2021-09-10 01:08:15 +03:00
struct nd_cmd_set_config_hdr * cmd ,
unsigned int buf_len )
{
2021-09-09 08:13:15 +03:00
struct cxl_mbox_set_lsa * set_lsa ;
2021-09-10 01:08:15 +03:00
int rc ;
if ( sizeof ( * cmd ) > buf_len )
return - EINVAL ;
/* 4-byte status follows the input data in the payload */
2022-09-27 10:02:47 +03:00
if ( size_add ( struct_size ( cmd , in_buf , cmd - > in_length ) , 4 ) > buf_len )
2021-09-10 01:08:15 +03:00
return - EINVAL ;
set_lsa =
kvzalloc ( struct_size ( set_lsa , data , cmd - > in_length ) , GFP_KERNEL ) ;
if ( ! set_lsa )
return - ENOMEM ;
* set_lsa = ( struct cxl_mbox_set_lsa ) {
2022-02-26 01:14:56 +03:00
. offset = cpu_to_le32 ( cmd - > in_offset ) ,
2021-09-10 01:08:15 +03:00
} ;
memcpy ( set_lsa - > data , cmd - > in_buf , cmd - > in_length ) ;
2021-11-02 23:29:01 +03:00
rc = cxl_mbox_send_cmd ( cxlds , CXL_MBOX_OP_SET_LSA , set_lsa ,
struct_size ( set_lsa , data , cmd - > in_length ) ,
NULL , 0 ) ;
2021-09-10 01:08:15 +03:00
/*
* Set " firmware " status ( 4 - packed bytes at the end of the input
* payload .
*/
put_unaligned ( 0 , ( u32 * ) & cmd - > in_buf [ cmd - > in_length ] ) ;
kvfree ( set_lsa ) ;
return rc ;
}
static int cxl_pmem_nvdimm_ctl ( struct nvdimm * nvdimm , unsigned int cmd ,
void * buf , unsigned int buf_len )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
unsigned long cmd_mask = nvdimm_cmd_mask ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2021-11-02 23:29:01 +03:00
struct cxl_dev_state * cxlds = cxlmd - > cxlds ;
2021-09-10 01:08:15 +03:00
if ( ! test_bit ( cmd , & cmd_mask ) )
return - ENOTTY ;
switch ( cmd ) {
case ND_CMD_GET_CONFIG_SIZE :
2021-11-02 23:29:01 +03:00
return cxl_pmem_get_config_size ( cxlds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
case ND_CMD_GET_CONFIG_DATA :
2021-11-02 23:29:01 +03:00
return cxl_pmem_get_config_data ( cxlds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
case ND_CMD_SET_CONFIG_DATA :
2021-11-02 23:29:01 +03:00
return cxl_pmem_set_config_data ( cxlds , buf , buf_len ) ;
2021-09-10 01:08:15 +03:00
default :
return - ENOTTY ;
}
}
2021-06-16 02:18:17 +03:00
static int cxl_pmem_ctl ( struct nvdimm_bus_descriptor * nd_desc ,
struct nvdimm * nvdimm , unsigned int cmd , void * buf ,
unsigned int buf_len , int * cmd_rc )
{
2021-09-10 01:08:15 +03:00
/*
* No firmware response to translate , let the transport error
* code take precedence .
*/
* cmd_rc = 0 ;
if ( ! nvdimm )
return - ENOTTY ;
return cxl_pmem_nvdimm_ctl ( nvdimm , cmd , buf , buf_len ) ;
2021-06-16 02:18:17 +03:00
}
static bool online_nvdimm_bus ( struct cxl_nvdimm_bridge * cxl_nvb )
{
if ( cxl_nvb - > nvdimm_bus )
return true ;
cxl_nvb - > nvdimm_bus =
nvdimm_bus_register ( & cxl_nvb - > dev , & cxl_nvb - > nd_desc ) ;
return cxl_nvb - > nvdimm_bus ! = NULL ;
}
2022-06-11 03:39:59 +03:00
static int cxl_nvdimm_release_driver ( struct device * dev , void * cxl_nvb )
2021-06-16 02:18:17 +03:00
{
2022-06-11 03:39:59 +03:00
struct cxl_nvdimm * cxl_nvd ;
2021-06-16 02:36:31 +03:00
if ( ! is_cxl_nvdimm ( dev ) )
return 0 ;
2022-06-11 03:39:59 +03:00
cxl_nvd = to_cxl_nvdimm ( dev ) ;
if ( cxl_nvd - > bridge ! = cxl_nvb )
return 0 ;
2021-06-16 02:36:31 +03:00
device_release_driver ( dev ) ;
return 0 ;
}
2022-01-11 19:06:40 +03:00
static int cxl_pmem_region_release_driver ( struct device * dev , void * cxl_nvb )
{
struct cxl_pmem_region * cxlr_pmem ;
if ( ! is_cxl_pmem_region ( dev ) )
return 0 ;
cxlr_pmem = to_cxl_pmem_region ( dev ) ;
if ( cxlr_pmem - > bridge ! = cxl_nvb )
return 0 ;
device_release_driver ( dev ) ;
return 0 ;
}
2022-06-11 03:39:59 +03:00
static void offline_nvdimm_bus ( struct cxl_nvdimm_bridge * cxl_nvb ,
struct nvdimm_bus * nvdimm_bus )
2021-06-16 02:36:31 +03:00
{
if ( ! nvdimm_bus )
2021-06-16 02:18:17 +03:00
return ;
2021-06-16 02:36:31 +03:00
/*
* Set the state of cxl_nvdimm devices to unbound / idle before
* nvdimm_bus_unregister ( ) rips the nvdimm objects out from
* underneath them .
*/
2022-01-11 19:06:40 +03:00
bus_for_each_dev ( & cxl_bus_type , NULL , cxl_nvb ,
cxl_pmem_region_release_driver ) ;
2022-06-11 03:39:59 +03:00
bus_for_each_dev ( & cxl_bus_type , NULL , cxl_nvb ,
cxl_nvdimm_release_driver ) ;
2021-06-16 02:36:31 +03:00
nvdimm_bus_unregister ( nvdimm_bus ) ;
2021-06-16 02:18:17 +03:00
}
static void cxl_nvb_update_state ( struct work_struct * work )
{
struct cxl_nvdimm_bridge * cxl_nvb =
container_of ( work , typeof ( * cxl_nvb ) , state_work ) ;
2021-06-16 02:36:31 +03:00
struct nvdimm_bus * victim_bus = NULL ;
bool release = false , rescan = false ;
2021-06-16 02:18:17 +03:00
2022-04-21 18:33:23 +03:00
device_lock ( & cxl_nvb - > dev ) ;
2021-06-16 02:18:17 +03:00
switch ( cxl_nvb - > state ) {
case CXL_NVB_ONLINE :
if ( ! online_nvdimm_bus ( cxl_nvb ) ) {
dev_err ( & cxl_nvb - > dev ,
" failed to establish nvdimm bus \n " ) ;
release = true ;
2021-06-16 02:36:31 +03:00
} else
rescan = true ;
2021-06-16 02:18:17 +03:00
break ;
case CXL_NVB_OFFLINE :
case CXL_NVB_DEAD :
2021-06-16 02:36:31 +03:00
victim_bus = cxl_nvb - > nvdimm_bus ;
cxl_nvb - > nvdimm_bus = NULL ;
2021-06-16 02:18:17 +03:00
break ;
default :
break ;
}
2022-04-21 18:33:23 +03:00
device_unlock ( & cxl_nvb - > dev ) ;
2021-06-16 02:18:17 +03:00
if ( release )
device_release_driver ( & cxl_nvb - > dev ) ;
2021-06-16 02:36:31 +03:00
if ( rescan ) {
int rc = bus_rescan_devices ( & cxl_bus_type ) ;
dev_dbg ( & cxl_nvb - > dev , " rescan: %d \n " , rc ) ;
}
2022-06-11 03:39:59 +03:00
offline_nvdimm_bus ( cxl_nvb , victim_bus ) ;
2021-06-16 02:18:17 +03:00
put_device ( & cxl_nvb - > dev ) ;
}
2021-10-29 22:55:47 +03:00
static void cxl_nvdimm_bridge_state_work ( struct cxl_nvdimm_bridge * cxl_nvb )
{
/*
* Take a reference that the workqueue will drop if new work
* gets queued .
*/
get_device ( & cxl_nvb - > dev ) ;
if ( ! queue_work ( cxl_pmem_wq , & cxl_nvb - > state_work ) )
put_device ( & cxl_nvb - > dev ) ;
}
2021-06-16 02:18:17 +03:00
static void cxl_nvdimm_bridge_remove ( struct device * dev )
{
struct cxl_nvdimm_bridge * cxl_nvb = to_cxl_nvdimm_bridge ( dev ) ;
if ( cxl_nvb - > state = = CXL_NVB_ONLINE )
cxl_nvb - > state = CXL_NVB_OFFLINE ;
2021-10-29 22:55:47 +03:00
cxl_nvdimm_bridge_state_work ( cxl_nvb ) ;
2021-06-16 02:18:17 +03:00
}
static int cxl_nvdimm_bridge_probe ( struct device * dev )
{
struct cxl_nvdimm_bridge * cxl_nvb = to_cxl_nvdimm_bridge ( dev ) ;
if ( cxl_nvb - > state = = CXL_NVB_DEAD )
return - ENXIO ;
if ( cxl_nvb - > state = = CXL_NVB_NEW ) {
cxl_nvb - > nd_desc = ( struct nvdimm_bus_descriptor ) {
. provider_name = " CXL " ,
. module = THIS_MODULE ,
. ndctl = cxl_pmem_ctl ,
} ;
INIT_WORK ( & cxl_nvb - > state_work , cxl_nvb_update_state ) ;
}
cxl_nvb - > state = CXL_NVB_ONLINE ;
2021-10-29 22:55:47 +03:00
cxl_nvdimm_bridge_state_work ( cxl_nvb ) ;
2021-06-16 02:18:17 +03:00
return 0 ;
}
static struct cxl_driver cxl_nvdimm_bridge_driver = {
. name = " cxl_nvdimm_bridge " ,
. probe = cxl_nvdimm_bridge_probe ,
. remove = cxl_nvdimm_bridge_remove ,
. id = CXL_DEVICE_NVDIMM_BRIDGE ,
} ;
2022-01-11 19:06:40 +03:00
static int match_cxl_nvdimm ( struct device * dev , void * data )
{
return is_cxl_nvdimm ( dev ) ;
}
static void unregister_nvdimm_region ( void * nd_region )
{
2022-11-04 03:30:36 +03:00
nvdimm_region_delete ( nd_region ) ;
}
static int cxl_nvdimm_add_region ( struct cxl_nvdimm * cxl_nvd ,
struct cxl_pmem_region * cxlr_pmem )
{
int rc ;
rc = xa_insert ( & cxl_nvd - > pmem_regions , ( unsigned long ) cxlr_pmem ,
cxlr_pmem , GFP_KERNEL ) ;
if ( rc )
return rc ;
get_device ( & cxlr_pmem - > dev ) ;
return 0 ;
}
static void cxl_nvdimm_del_region ( struct cxl_nvdimm * cxl_nvd ,
struct cxl_pmem_region * cxlr_pmem )
{
/*
* It is possible this is called without a corresponding
* cxl_nvdimm_add_region for @ cxlr_pmem
*/
cxlr_pmem = xa_erase ( & cxl_nvd - > pmem_regions , ( unsigned long ) cxlr_pmem ) ;
if ( cxlr_pmem )
put_device ( & cxlr_pmem - > dev ) ;
}
static void release_mappings ( void * data )
{
2022-01-11 19:06:40 +03:00
int i ;
2022-11-04 03:30:36 +03:00
struct cxl_pmem_region * cxlr_pmem = data ;
struct cxl_nvdimm_bridge * cxl_nvb = cxlr_pmem - > bridge ;
2022-01-11 19:06:40 +03:00
device_lock ( & cxl_nvb - > dev ) ;
for ( i = 0 ; i < cxlr_pmem - > nr_mappings ; i + + ) {
struct cxl_pmem_region_mapping * m = & cxlr_pmem - > mapping [ i ] ;
struct cxl_nvdimm * cxl_nvd = m - > cxl_nvd ;
2022-11-04 03:30:36 +03:00
cxl_nvdimm_del_region ( cxl_nvd , cxlr_pmem ) ;
2022-01-11 19:06:40 +03:00
}
device_unlock ( & cxl_nvb - > dev ) ;
}
static void cxlr_pmem_remove_resource ( void * res )
{
remove_resource ( res ) ;
}
struct cxl_pmem_region_info {
u64 offset ;
u64 serial ;
} ;
static int cxl_pmem_region_probe ( struct device * dev )
{
struct nd_mapping_desc mappings [ CXL_DECODER_MAX_INTERLEAVE ] ;
struct cxl_pmem_region * cxlr_pmem = to_cxl_pmem_region ( dev ) ;
struct cxl_region * cxlr = cxlr_pmem - > cxlr ;
struct cxl_pmem_region_info * info = NULL ;
struct cxl_nvdimm_bridge * cxl_nvb ;
struct nd_interleave_set * nd_set ;
struct nd_region_desc ndr_desc ;
struct cxl_nvdimm * cxl_nvd ;
struct nvdimm * nvdimm ;
struct resource * res ;
int rc , i = 0 ;
cxl_nvb = cxl_find_nvdimm_bridge ( & cxlr_pmem - > mapping [ 0 ] . cxlmd - > dev ) ;
if ( ! cxl_nvb ) {
dev_dbg ( dev , " bridge not found \n " ) ;
return - ENXIO ;
}
cxlr_pmem - > bridge = cxl_nvb ;
device_lock ( & cxl_nvb - > dev ) ;
if ( ! cxl_nvb - > nvdimm_bus ) {
dev_dbg ( dev , " nvdimm bus not found \n " ) ;
rc = - ENXIO ;
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
}
memset ( & mappings , 0 , sizeof ( mappings ) ) ;
memset ( & ndr_desc , 0 , sizeof ( ndr_desc ) ) ;
res = devm_kzalloc ( dev , sizeof ( * res ) , GFP_KERNEL ) ;
if ( ! res ) {
rc = - ENOMEM ;
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
}
res - > name = " Persistent Memory " ;
res - > start = cxlr_pmem - > hpa_range . start ;
res - > end = cxlr_pmem - > hpa_range . end ;
res - > flags = IORESOURCE_MEM ;
res - > desc = IORES_DESC_PERSISTENT_MEMORY ;
rc = insert_resource ( & iomem_resource , res ) ;
if ( rc )
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
rc = devm_add_action_or_reset ( dev , cxlr_pmem_remove_resource , res ) ;
if ( rc )
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
ndr_desc . res = res ;
ndr_desc . provider_data = cxlr_pmem ;
ndr_desc . numa_node = memory_add_physaddr_to_nid ( res - > start ) ;
ndr_desc . target_node = phys_to_target_node ( res - > start ) ;
if ( ndr_desc . target_node = = NUMA_NO_NODE ) {
ndr_desc . target_node = ndr_desc . numa_node ;
dev_dbg ( & cxlr - > dev , " changing target node from %d to %d " ,
NUMA_NO_NODE , ndr_desc . target_node ) ;
}
nd_set = devm_kzalloc ( dev , sizeof ( * nd_set ) , GFP_KERNEL ) ;
if ( ! nd_set ) {
rc = - ENOMEM ;
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
}
ndr_desc . memregion = cxlr - > id ;
set_bit ( ND_REGION_CXL , & ndr_desc . flags ) ;
set_bit ( ND_REGION_PERSIST_MEMCTRL , & ndr_desc . flags ) ;
info = kmalloc_array ( cxlr_pmem - > nr_mappings , sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info ) {
rc = - ENOMEM ;
2022-11-04 03:30:36 +03:00
goto out_nvb ;
2022-01-11 19:06:40 +03:00
}
2022-11-04 03:30:36 +03:00
rc = devm_add_action_or_reset ( dev , release_mappings , cxlr_pmem ) ;
if ( rc )
goto out_nvd ;
2022-01-11 19:06:40 +03:00
for ( i = 0 ; i < cxlr_pmem - > nr_mappings ; i + + ) {
struct cxl_pmem_region_mapping * m = & cxlr_pmem - > mapping [ i ] ;
struct cxl_memdev * cxlmd = m - > cxlmd ;
struct cxl_dev_state * cxlds = cxlmd - > cxlds ;
struct device * d ;
d = device_find_child ( & cxlmd - > dev , NULL , match_cxl_nvdimm ) ;
if ( ! d ) {
dev_dbg ( dev , " [%d]: %s: no cxl_nvdimm found \n " , i ,
dev_name ( & cxlmd - > dev ) ) ;
rc = - ENODEV ;
2022-11-04 03:30:36 +03:00
goto out_nvd ;
2022-01-11 19:06:40 +03:00
}
/* safe to drop ref now with bridge lock held */
put_device ( d ) ;
cxl_nvd = to_cxl_nvdimm ( d ) ;
nvdimm = dev_get_drvdata ( & cxl_nvd - > dev ) ;
if ( ! nvdimm ) {
dev_dbg ( dev , " [%d]: %s: no nvdimm found \n " , i ,
dev_name ( & cxlmd - > dev ) ) ;
rc = - ENODEV ;
2022-11-04 03:30:36 +03:00
goto out_nvd ;
2022-01-11 19:06:40 +03:00
}
2022-11-04 03:30:36 +03:00
/*
* Pin the region per nvdimm device as those may be released
* out - of - order with respect to the region , and a single nvdimm
* maybe associated with multiple regions
*/
rc = cxl_nvdimm_add_region ( cxl_nvd , cxlr_pmem ) ;
if ( rc )
goto out_nvd ;
2022-01-11 19:06:40 +03:00
m - > cxl_nvd = cxl_nvd ;
mappings [ i ] = ( struct nd_mapping_desc ) {
. nvdimm = nvdimm ,
. start = m - > start ,
. size = m - > size ,
. position = i ,
} ;
info [ i ] . offset = m - > start ;
info [ i ] . serial = cxlds - > serial ;
}
ndr_desc . num_mappings = cxlr_pmem - > nr_mappings ;
ndr_desc . mapping = mappings ;
/*
* TODO enable CXL labels which skip the need for ' interleave - set cookie '
*/
nd_set - > cookie1 =
nd_fletcher64 ( info , sizeof ( * info ) * cxlr_pmem - > nr_mappings , 0 ) ;
nd_set - > cookie2 = nd_set - > cookie1 ;
ndr_desc . nd_set = nd_set ;
cxlr_pmem - > nd_region =
nvdimm_pmem_region_create ( cxl_nvb - > nvdimm_bus , & ndr_desc ) ;
2022-08-03 12:07:50 +03:00
if ( ! cxlr_pmem - > nd_region ) {
rc = - ENOMEM ;
2022-11-04 03:30:36 +03:00
goto out_nvd ;
2022-01-11 19:06:40 +03:00
}
rc = devm_add_action_or_reset ( dev , unregister_nvdimm_region ,
cxlr_pmem - > nd_region ) ;
2022-11-04 03:30:36 +03:00
out_nvd :
2022-01-11 19:06:40 +03:00
kfree ( info ) ;
2022-11-04 03:30:36 +03:00
out_nvb :
2022-01-11 19:06:40 +03:00
device_unlock ( & cxl_nvb - > dev ) ;
put_device ( & cxl_nvb - > dev ) ;
return rc ;
}
static struct cxl_driver cxl_pmem_region_driver = {
. name = " cxl_pmem_region " ,
. probe = cxl_pmem_region_probe ,
. id = CXL_DEVICE_PMEM_REGION ,
} ;
2021-11-11 21:19:05 +03:00
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* - > state_work referring to the now destroyed cxl_pmem_wq .
*/
static int cxl_nvdimm_bridge_reset ( struct device * dev , void * data )
{
struct cxl_nvdimm_bridge * cxl_nvb ;
if ( ! is_cxl_nvdimm_bridge ( dev ) )
return 0 ;
cxl_nvb = to_cxl_nvdimm_bridge ( dev ) ;
2022-04-21 18:33:23 +03:00
device_lock ( dev ) ;
2021-11-11 21:19:05 +03:00
cxl_nvb - > state = CXL_NVB_NEW ;
2022-04-21 18:33:23 +03:00
device_unlock ( dev ) ;
2021-11-11 21:19:05 +03:00
return 0 ;
}
static void destroy_cxl_pmem_wq ( void )
{
destroy_workqueue ( cxl_pmem_wq ) ;
bus_for_each_dev ( & cxl_bus_type , NULL , NULL , cxl_nvdimm_bridge_reset ) ;
}
2021-06-16 02:18:17 +03:00
static __init int cxl_pmem_init ( void )
{
int rc ;
2021-09-14 22:03:04 +03:00
set_bit ( CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE , exclusive_cmds ) ;
set_bit ( CXL_MEM_COMMAND_ID_SET_LSA , exclusive_cmds ) ;
2021-06-16 02:18:17 +03:00
cxl_pmem_wq = alloc_ordered_workqueue ( " cxl_pmem " , 0 ) ;
if ( ! cxl_pmem_wq )
return - ENXIO ;
rc = cxl_driver_register ( & cxl_nvdimm_bridge_driver ) ;
if ( rc )
2021-06-16 02:36:31 +03:00
goto err_bridge ;
rc = cxl_driver_register ( & cxl_nvdimm_driver ) ;
if ( rc )
goto err_nvdimm ;
2021-06-16 02:18:17 +03:00
2022-01-11 19:06:40 +03:00
rc = cxl_driver_register ( & cxl_pmem_region_driver ) ;
if ( rc )
goto err_region ;
2021-06-16 02:18:17 +03:00
return 0 ;
2022-01-11 19:06:40 +03:00
err_region :
cxl_driver_unregister ( & cxl_nvdimm_driver ) ;
2021-06-16 02:36:31 +03:00
err_nvdimm :
cxl_driver_unregister ( & cxl_nvdimm_bridge_driver ) ;
err_bridge :
2021-11-11 21:19:05 +03:00
destroy_cxl_pmem_wq ( ) ;
2021-06-16 02:18:17 +03:00
return rc ;
}
static __exit void cxl_pmem_exit ( void )
{
2022-01-11 19:06:40 +03:00
cxl_driver_unregister ( & cxl_pmem_region_driver ) ;
2021-06-16 02:36:31 +03:00
cxl_driver_unregister ( & cxl_nvdimm_driver ) ;
2021-06-16 02:18:17 +03:00
cxl_driver_unregister ( & cxl_nvdimm_bridge_driver ) ;
2021-11-11 21:19:05 +03:00
destroy_cxl_pmem_wq ( ) ;
2021-06-16 02:18:17 +03:00
}
MODULE_LICENSE ( " GPL v2 " ) ;
module_init ( cxl_pmem_init ) ;
module_exit ( cxl_pmem_exit ) ;
MODULE_IMPORT_NS ( CXL ) ;
MODULE_ALIAS_CXL ( CXL_DEVICE_NVDIMM_BRIDGE ) ;
2021-06-16 02:36:31 +03:00
MODULE_ALIAS_CXL ( CXL_DEVICE_NVDIMM ) ;
2022-01-11 19:06:40 +03:00
MODULE_ALIAS_CXL ( CXL_DEVICE_PMEM_REGION ) ;