2021-02-16 20:09:52 -08:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
2021-05-13 22:22:05 -07:00
# include <linux/io-64-nonatomic-lo-hi.h>
2022-02-04 07:18:31 -08:00
# include <linux/workqueue.h>
2022-07-10 09:57:28 -07:00
# include <linux/debugfs.h>
2021-02-16 20:09:52 -08:00
# include <linux/device.h>
# include <linux/module.h>
2021-06-03 17:50:36 -07:00
# include <linux/pci.h>
2021-06-09 09:01:35 -07:00
# include <linux/slab.h>
# include <linux/idr.h>
2021-08-02 10:29:38 -07:00
# include <cxlmem.h>
2022-02-04 07:08:40 -08:00
# include <cxlpci.h>
2021-08-02 10:29:38 -07:00
# include <cxl.h>
2021-08-02 10:29:49 -07:00
# include "core.h"
2021-02-16 20:09:52 -08:00
/**
2021-05-13 22:22:00 -07:00
* DOC : cxl core
2021-02-16 20:09:52 -08:00
*
2021-08-02 10:29:43 -07:00
* The CXL core provides a set of interfaces that can be consumed by CXL aware
* drivers . The interfaces allow for creation , modification , and destruction of
* regions , memory devices , ports , and decoders . CXL aware drivers must register
* with the CXL core via these interfaces in order to be able to participate in
* cross - device interleave coordination . The CXL core also establishes and
* maintains the bridge to the nvdimm subsystem .
*
* CXL core introduces sysfs hierarchy to control the devices that are
* instantiated by the core .
2021-02-16 20:09:52 -08:00
*/
2021-05-13 22:22:00 -07:00
2021-06-09 09:01:35 -07:00
static DEFINE_IDA ( cxl_port_ida ) ;
2022-01-31 08:44:52 -08:00
static DEFINE_XARRAY ( cxl_root_buses ) ;
2021-06-09 09:01:35 -07:00
static ssize_t devtype_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sysfs_emit ( buf , " %s \n " , dev - > type - > name ) ;
}
static DEVICE_ATTR_RO ( devtype ) ;
2022-01-23 16:30:41 -08:00
static int cxl_device_id ( struct device * dev )
{
if ( dev - > type = = & cxl_nvdimm_bridge_type )
return CXL_DEVICE_NVDIMM_BRIDGE ;
if ( dev - > type = = & cxl_nvdimm_type )
return CXL_DEVICE_NVDIMM ;
2022-02-01 13:07:51 -08:00
if ( is_cxl_port ( dev ) ) {
if ( is_cxl_root ( to_cxl_port ( dev ) ) )
return CXL_DEVICE_ROOT ;
return CXL_DEVICE_PORT ;
}
2022-02-04 07:18:31 -08:00
if ( is_cxl_memdev ( dev ) )
return CXL_DEVICE_MEMORY_EXPANDER ;
2022-01-23 16:30:41 -08:00
return 0 ;
}
static ssize_t modalias_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sysfs_emit ( buf , CXL_MODALIAS_FMT " \n " , cxl_device_id ( dev ) ) ;
}
static DEVICE_ATTR_RO ( modalias ) ;
2021-06-09 09:01:35 -07:00
static struct attribute * cxl_base_attributes [ ] = {
& dev_attr_devtype . attr ,
2022-01-23 16:30:41 -08:00
& dev_attr_modalias . attr ,
2021-06-09 09:01:35 -07:00
NULL ,
} ;
2021-08-02 10:29:49 -07:00
struct attribute_group cxl_base_attribute_group = {
2021-06-09 09:01:35 -07:00
. attrs = cxl_base_attributes ,
} ;
2021-06-09 09:43:29 -07:00
static ssize_t start_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
2022-05-18 18:02:39 -07:00
return sysfs_emit ( buf , " %#llx \n " , cxld - > hpa_range . start ) ;
2021-06-09 09:43:29 -07:00
}
2022-01-23 16:29:26 -08:00
static DEVICE_ATTR_ADMIN_RO ( start ) ;
2021-06-09 09:43:29 -07:00
static ssize_t size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
2022-05-18 18:02:39 -07:00
return sysfs_emit ( buf , " %#llx \n " , range_len ( & cxld - > hpa_range ) ) ;
2021-06-09 09:43:29 -07:00
}
static DEVICE_ATTR_RO ( size ) ;
# define CXL_DECODER_FLAG_ATTR(name, flag) \
static ssize_t name # # _show ( struct device * dev , \
struct device_attribute * attr , char * buf ) \
{ \
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ; \
\
return sysfs_emit ( buf , " %s \n " , \
( cxld - > flags & ( flag ) ) ? " 1 " : " 0 " ) ; \
} \
static DEVICE_ATTR_RO ( name )
CXL_DECODER_FLAG_ATTR ( cap_pmem , CXL_DECODER_F_PMEM ) ;
CXL_DECODER_FLAG_ATTR ( cap_ram , CXL_DECODER_F_RAM ) ;
CXL_DECODER_FLAG_ATTR ( cap_type2 , CXL_DECODER_F_TYPE2 ) ;
CXL_DECODER_FLAG_ATTR ( cap_type3 , CXL_DECODER_F_TYPE3 ) ;
CXL_DECODER_FLAG_ATTR ( locked , CXL_DECODER_F_LOCK ) ;
static ssize_t target_type_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
switch ( cxld - > target_type ) {
case CXL_DECODER_ACCELERATOR :
return sysfs_emit ( buf , " accelerator \n " ) ;
case CXL_DECODER_EXPANDER :
return sysfs_emit ( buf , " expander \n " ) ;
}
return - ENXIO ;
}
static DEVICE_ATTR_RO ( target_type ) ;
2022-05-18 17:52:23 -07:00
static ssize_t emit_target_list ( struct cxl_switch_decoder * cxlsd , char * buf )
2021-06-09 09:43:29 -07:00
{
2022-05-18 17:52:23 -07:00
struct cxl_decoder * cxld = & cxlsd - > cxld ;
2021-06-09 09:43:29 -07:00
ssize_t offset = 0 ;
int i , rc = 0 ;
for ( i = 0 ; i < cxld - > interleave_ways ; i + + ) {
2022-05-18 17:52:23 -07:00
struct cxl_dport * dport = cxlsd - > target [ i ] ;
2021-06-09 09:43:29 -07:00
struct cxl_dport * next = NULL ;
if ( ! dport )
break ;
if ( i + 1 < cxld - > interleave_ways )
2022-05-18 17:52:23 -07:00
next = cxlsd - > target [ i + 1 ] ;
2021-06-09 09:43:29 -07:00
rc = sysfs_emit_at ( buf , offset , " %d%s " , dport - > port_id ,
next ? " , " : " " ) ;
if ( rc < 0 )
2022-01-31 15:35:18 -08:00
return rc ;
2021-06-09 09:43:29 -07:00
offset + = rc ;
}
2022-01-31 15:35:18 -08:00
return offset ;
}
2022-05-18 17:52:23 -07:00
static struct cxl_switch_decoder * to_cxl_switch_decoder ( struct device * dev ) ;
2022-01-31 15:35:18 -08:00
static ssize_t target_list_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2022-05-18 17:52:23 -07:00
struct cxl_switch_decoder * cxlsd = to_cxl_switch_decoder ( dev ) ;
2022-01-31 15:35:18 -08:00
ssize_t offset ;
unsigned int seq ;
int rc ;
do {
2022-05-18 17:52:23 -07:00
seq = read_seqbegin ( & cxlsd - > target_lock ) ;
rc = emit_target_list ( cxlsd , buf ) ;
} while ( read_seqretry ( & cxlsd - > target_lock , seq ) ) ;
2021-06-09 09:43:29 -07:00
if ( rc < 0 )
return rc ;
2022-01-31 15:35:18 -08:00
offset = rc ;
2021-06-09 09:43:29 -07:00
rc = sysfs_emit_at ( buf , offset , " \n " ) ;
if ( rc < 0 )
return rc ;
return offset + rc ;
}
static DEVICE_ATTR_RO ( target_list ) ;
2022-05-23 12:15:35 -07:00
static ssize_t mode_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
switch ( cxled - > mode ) {
case CXL_DECODER_RAM :
return sysfs_emit ( buf , " ram \n " ) ;
case CXL_DECODER_PMEM :
return sysfs_emit ( buf , " pmem \n " ) ;
case CXL_DECODER_NONE :
return sysfs_emit ( buf , " none \n " ) ;
case CXL_DECODER_MIXED :
default :
return sysfs_emit ( buf , " mixed \n " ) ;
}
}
2022-05-23 18:02:30 -07:00
static ssize_t mode_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t len )
{
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
enum cxl_decoder_mode mode ;
ssize_t rc ;
if ( sysfs_streq ( buf , " pmem " ) )
mode = CXL_DECODER_PMEM ;
else if ( sysfs_streq ( buf , " ram " ) )
mode = CXL_DECODER_RAM ;
else
return - EINVAL ;
rc = cxl_dpa_set_mode ( cxled , mode ) ;
if ( rc )
return rc ;
return len ;
}
static DEVICE_ATTR_RW ( mode ) ;
static ssize_t dpa_resource_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
u64 base = cxl_dpa_resource_start ( cxled ) ;
return sysfs_emit ( buf , " %#llx \n " , base ) ;
}
static DEVICE_ATTR_RO ( dpa_resource ) ;
static ssize_t dpa_size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
resource_size_t size = cxl_dpa_size ( cxled ) ;
return sysfs_emit ( buf , " %pa \n " , & size ) ;
}
static ssize_t dpa_size_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t len )
{
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
unsigned long long size ;
ssize_t rc ;
rc = kstrtoull ( buf , 0 , & size ) ;
if ( rc )
return rc ;
if ( ! IS_ALIGNED ( size , SZ_256M ) )
return - EINVAL ;
rc = cxl_dpa_free ( cxled ) ;
if ( rc )
return rc ;
if ( size = = 0 )
return len ;
rc = cxl_dpa_alloc ( cxled , size ) ;
if ( rc )
return rc ;
return len ;
}
static DEVICE_ATTR_RW ( dpa_size ) ;
2022-05-23 12:15:35 -07:00
2021-06-09 09:43:29 -07:00
static struct attribute * cxl_decoder_base_attrs [ ] = {
& dev_attr_start . attr ,
& dev_attr_size . attr ,
& dev_attr_locked . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_base_attribute_group = {
. attrs = cxl_decoder_base_attrs ,
} ;
static struct attribute * cxl_decoder_root_attrs [ ] = {
& dev_attr_cap_pmem . attr ,
& dev_attr_cap_ram . attr ,
& dev_attr_cap_type2 . attr ,
& dev_attr_cap_type3 . attr ,
2022-01-23 16:31:41 -08:00
& dev_attr_target_list . attr ,
2021-06-09 09:43:29 -07:00
NULL ,
} ;
static struct attribute_group cxl_decoder_root_attribute_group = {
. attrs = cxl_decoder_root_attrs ,
} ;
static const struct attribute_group * cxl_decoder_root_attribute_groups [ ] = {
& cxl_decoder_root_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
static struct attribute * cxl_decoder_switch_attrs [ ] = {
& dev_attr_target_type . attr ,
2022-01-23 16:31:41 -08:00
& dev_attr_target_list . attr ,
2021-06-09 09:43:29 -07:00
NULL ,
} ;
static struct attribute_group cxl_decoder_switch_attribute_group = {
. attrs = cxl_decoder_switch_attrs ,
} ;
static const struct attribute_group * cxl_decoder_switch_attribute_groups [ ] = {
& cxl_decoder_switch_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
2022-02-02 20:02:06 -08:00
static struct attribute * cxl_decoder_endpoint_attrs [ ] = {
& dev_attr_target_type . attr ,
2022-05-23 12:15:35 -07:00
& dev_attr_mode . attr ,
2022-05-23 18:02:30 -07:00
& dev_attr_dpa_size . attr ,
& dev_attr_dpa_resource . attr ,
2022-02-02 20:02:06 -08:00
NULL ,
} ;
static struct attribute_group cxl_decoder_endpoint_attribute_group = {
. attrs = cxl_decoder_endpoint_attrs ,
} ;
static const struct attribute_group * cxl_decoder_endpoint_attribute_groups [ ] = {
& cxl_decoder_base_attribute_group ,
& cxl_decoder_endpoint_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
2022-05-18 17:52:23 -07:00
static void __cxl_decoder_release ( struct cxl_decoder * cxld )
{
struct cxl_port * port = to_cxl_port ( cxld - > dev . parent ) ;
ida_free ( & port - > decoder_ida , cxld - > id ) ;
put_device ( & port - > dev ) ;
}
2022-05-21 16:24:14 -07:00
static void cxl_endpoint_decoder_release ( struct device * dev )
2021-06-09 09:43:29 -07:00
{
2022-05-21 16:24:14 -07:00
struct cxl_endpoint_decoder * cxled = to_cxl_endpoint_decoder ( dev ) ;
2021-06-09 09:43:29 -07:00
2022-05-21 16:24:14 -07:00
__cxl_decoder_release ( & cxled - > cxld ) ;
kfree ( cxled ) ;
2022-05-18 17:52:23 -07:00
}
static void cxl_switch_decoder_release ( struct device * dev )
{
struct cxl_switch_decoder * cxlsd = to_cxl_switch_decoder ( dev ) ;
__cxl_decoder_release ( & cxlsd - > cxld ) ;
kfree ( cxlsd ) ;
2021-06-09 09:43:29 -07:00
}
2022-07-12 18:38:26 -07:00
struct cxl_root_decoder * to_cxl_root_decoder ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , ! is_root_decoder ( dev ) ,
" not a cxl_root_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_root_decoder , cxlsd . cxld . dev ) ;
}
EXPORT_SYMBOL_NS_GPL ( to_cxl_root_decoder , CXL ) ;
static void cxl_root_decoder_release ( struct device * dev )
{
struct cxl_root_decoder * cxlrd = to_cxl_root_decoder ( dev ) ;
__cxl_decoder_release ( & cxlrd - > cxlsd . cxld ) ;
kfree ( cxlrd ) ;
}
2022-02-02 20:02:06 -08:00
static const struct device_type cxl_decoder_endpoint_type = {
. name = " cxl_decoder_endpoint " ,
2022-05-21 16:24:14 -07:00
. release = cxl_endpoint_decoder_release ,
2022-02-02 20:02:06 -08:00
. groups = cxl_decoder_endpoint_attribute_groups ,
} ;
2021-06-09 09:43:29 -07:00
static const struct device_type cxl_decoder_switch_type = {
. name = " cxl_decoder_switch " ,
2022-05-18 17:52:23 -07:00
. release = cxl_switch_decoder_release ,
2021-06-09 09:43:29 -07:00
. groups = cxl_decoder_switch_attribute_groups ,
} ;
static const struct device_type cxl_decoder_root_type = {
. name = " cxl_decoder_root " ,
2022-07-12 18:38:26 -07:00
. release = cxl_root_decoder_release ,
2021-06-09 09:43:29 -07:00
. groups = cxl_decoder_root_attribute_groups ,
} ;
2022-03-04 13:36:45 -08:00
bool is_endpoint_decoder ( struct device * dev )
2022-02-02 20:02:06 -08:00
{
return dev - > type = = & cxl_decoder_endpoint_type ;
}
2021-06-15 16:18:17 -07:00
bool is_root_decoder ( struct device * dev )
{
return dev - > type = = & cxl_decoder_root_type ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( is_root_decoder , CXL ) ;
2021-06-15 16:18:17 -07:00
2022-05-18 17:52:23 -07:00
static bool is_switch_decoder ( struct device * dev )
{
return is_root_decoder ( dev ) | | dev - > type = = & cxl_decoder_switch_type ;
}
2021-06-09 09:43:29 -07:00
struct cxl_decoder * to_cxl_decoder ( struct device * dev )
{
2022-05-18 17:52:23 -07:00
if ( dev_WARN_ONCE ( dev ,
! is_switch_decoder ( dev ) & & ! is_endpoint_decoder ( dev ) ,
2021-06-09 09:43:29 -07:00
" not a cxl_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_decoder , dev ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( to_cxl_decoder , CXL ) ;
2021-06-09 09:43:29 -07:00
2022-05-21 16:24:14 -07:00
struct cxl_endpoint_decoder * to_cxl_endpoint_decoder ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , ! is_endpoint_decoder ( dev ) ,
" not a cxl_endpoint_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_endpoint_decoder , cxld . dev ) ;
}
EXPORT_SYMBOL_NS_GPL ( to_cxl_endpoint_decoder , CXL ) ;
2022-05-18 17:52:23 -07:00
static struct cxl_switch_decoder * to_cxl_switch_decoder ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , ! is_switch_decoder ( dev ) ,
" not a cxl_switch_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_switch_decoder , cxld . dev ) ;
}
2022-02-04 07:08:40 -08:00
static void cxl_ep_release ( struct cxl_ep * ep )
{
if ( ! ep )
return ;
list_del ( & ep - > list ) ;
put_device ( ep - > ep ) ;
kfree ( ep ) ;
}
2021-06-09 09:01:35 -07:00
static void cxl_port_release ( struct device * dev )
{
struct cxl_port * port = to_cxl_port ( dev ) ;
2022-02-04 07:08:40 -08:00
struct cxl_ep * ep , * _e ;
2021-06-09 09:01:35 -07:00
2022-04-21 08:33:23 -07:00
device_lock ( dev ) ;
2022-02-04 07:08:40 -08:00
list_for_each_entry_safe ( ep , _e , & port - > endpoints , list )
cxl_ep_release ( ep ) ;
2022-04-21 08:33:23 -07:00
device_unlock ( dev ) ;
2021-06-09 09:01:35 -07:00
ida_free ( & cxl_port_ida , port - > id ) ;
kfree ( port ) ;
}
static const struct attribute_group * cxl_port_attribute_groups [ ] = {
& cxl_base_attribute_group ,
NULL ,
} ;
static const struct device_type cxl_port_type = {
. name = " cxl_port " ,
. release = cxl_port_release ,
. groups = cxl_port_attribute_groups ,
} ;
2022-01-31 11:50:09 -08:00
bool is_cxl_port ( struct device * dev )
{
return dev - > type = = & cxl_port_type ;
}
EXPORT_SYMBOL_NS_GPL ( is_cxl_port , CXL ) ;
2021-06-09 09:01:35 -07:00
struct cxl_port * to_cxl_port ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , dev - > type ! = & cxl_port_type ,
" not a cxl_port device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_port , dev ) ;
}
2022-01-31 11:50:09 -08:00
EXPORT_SYMBOL_NS_GPL ( to_cxl_port , CXL ) ;
2021-06-09 09:01:35 -07:00
2021-06-09 09:01:46 -07:00
static void unregister_port ( void * _port )
2021-06-09 09:01:35 -07:00
{
2021-06-09 09:01:46 -07:00
struct cxl_port * port = _port ;
2022-02-08 23:37:28 -08:00
struct cxl_port * parent ;
struct device * lock_dev ;
2021-06-09 09:01:46 -07:00
2022-02-08 23:37:28 -08:00
if ( is_cxl_root ( port ) )
parent = NULL ;
else
parent = to_cxl_port ( port - > dev . parent ) ;
/*
* CXL root port ' s and the first level of ports are unregistered
* under the platform firmware device lock , all other ports are
* unregistered while holding their parent port lock .
*/
if ( ! parent )
lock_dev = port - > uport ;
else if ( is_cxl_root ( parent ) )
lock_dev = parent - > uport ;
else
lock_dev = & parent - > dev ;
2022-02-01 13:07:51 -08:00
2022-02-08 23:37:28 -08:00
device_lock_assert ( lock_dev ) ;
2022-06-03 16:43:48 -07:00
port - > dead = true ;
2021-06-09 09:01:46 -07:00
device_unregister ( & port - > dev ) ;
2021-06-09 09:01:35 -07:00
}
static void cxl_unlink_uport ( void * _port )
{
struct cxl_port * port = _port ;
sysfs_remove_link ( & port - > dev . kobj , " uport " ) ;
}
static int devm_cxl_link_uport ( struct device * host , struct cxl_port * port )
{
int rc ;
rc = sysfs_create_link ( & port - > dev . kobj , & port - > uport - > kobj , " uport " ) ;
if ( rc )
return rc ;
return devm_add_action_or_reset ( host , cxl_unlink_uport , port ) ;
}
2022-04-21 08:33:13 -07:00
static struct lock_class_key cxl_port_key ;
2021-06-09 09:01:35 -07:00
static struct cxl_port * cxl_port_alloc ( struct device * uport ,
resource_size_t component_reg_phys ,
2022-05-27 10:57:01 -07:00
struct cxl_dport * parent_dport )
2021-06-09 09:01:35 -07:00
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
port = kzalloc ( sizeof ( * port ) , GFP_KERNEL ) ;
if ( ! port )
return ERR_PTR ( - ENOMEM ) ;
rc = ida_alloc ( & cxl_port_ida , GFP_KERNEL ) ;
if ( rc < 0 )
goto err ;
port - > id = rc ;
2022-06-01 12:49:32 -07:00
port - > uport = uport ;
2021-06-09 09:01:35 -07:00
/*
* The top - level cxl_port " cxl_root " does not have a cxl_port as
* its parent and it does not have any corresponding component
* registers as its decode is described by a fixed platform
* description .
*/
dev = & port - > dev ;
2022-05-27 10:57:01 -07:00
if ( parent_dport ) {
struct cxl_port * parent_port = parent_dport - > port ;
2022-06-01 12:49:32 -07:00
struct cxl_port * iter ;
2021-06-09 09:01:35 -07:00
dev - > parent = & parent_port - > dev ;
2022-04-21 08:33:13 -07:00
port - > depth = parent_port - > depth + 1 ;
2022-05-27 10:57:01 -07:00
port - > parent_dport = parent_dport ;
2022-06-01 12:49:32 -07:00
/*
* walk to the host bridge , or the first ancestor that knows
* the host bridge
*/
iter = port ;
while ( ! iter - > host_bridge & &
! is_cxl_root ( to_cxl_port ( iter - > dev . parent ) ) )
iter = to_cxl_port ( iter - > dev . parent ) ;
if ( iter - > host_bridge )
port - > host_bridge = iter - > host_bridge ;
else
port - > host_bridge = iter - > uport ;
dev_dbg ( uport , " host-bridge: %s \n " , dev_name ( port - > host_bridge ) ) ;
2022-04-21 08:33:13 -07:00
} else
2021-06-09 09:01:35 -07:00
dev - > parent = uport ;
port - > component_reg_phys = component_reg_phys ;
2021-06-09 09:43:29 -07:00
ida_init ( & port - > decoder_ida ) ;
2022-05-24 12:04:58 -07:00
port - > hdm_end = - 1 ;
2021-06-09 09:01:46 -07:00
INIT_LIST_HEAD ( & port - > dports ) ;
2022-02-04 07:08:40 -08:00
INIT_LIST_HEAD ( & port - > endpoints ) ;
2021-06-09 09:01:35 -07:00
device_initialize ( dev ) ;
2022-04-21 08:33:13 -07:00
lockdep_set_class_and_subclass ( & dev - > mutex , & cxl_port_key , port - > depth ) ;
2021-06-09 09:01:35 -07:00
device_set_pm_not_required ( dev ) ;
dev - > bus = & cxl_bus_type ;
dev - > type = & cxl_port_type ;
return port ;
err :
kfree ( port ) ;
return ERR_PTR ( rc ) ;
}
/**
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
* @ host : host device for devm operations
* @ uport : " physical " device implementing this upstream port
* @ component_reg_phys : ( optional ) for configurable cxl_port instances
2022-05-27 10:57:01 -07:00
* @ parent_dport : next hop up in the CXL memory decode hierarchy
2021-06-09 09:01:35 -07:00
*/
struct cxl_port * devm_cxl_add_port ( struct device * host , struct device * uport ,
resource_size_t component_reg_phys ,
2022-05-27 10:57:01 -07:00
struct cxl_dport * parent_dport )
2021-06-09 09:01:35 -07:00
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
2022-05-27 10:57:01 -07:00
port = cxl_port_alloc ( uport , component_reg_phys , parent_dport ) ;
2021-06-09 09:01:35 -07:00
if ( IS_ERR ( port ) )
return port ;
dev = & port - > dev ;
2022-02-04 07:18:31 -08:00
if ( is_cxl_memdev ( uport ) )
rc = dev_set_name ( dev , " endpoint%d " , port - > id ) ;
2022-05-27 10:57:01 -07:00
else if ( parent_dport )
2021-06-09 09:01:35 -07:00
rc = dev_set_name ( dev , " port%d " , port - > id ) ;
else
rc = dev_set_name ( dev , " root%d " , port - > id ) ;
if ( rc )
goto err ;
rc = device_add ( dev ) ;
if ( rc )
goto err ;
2021-06-09 09:01:46 -07:00
rc = devm_add_action_or_reset ( host , unregister_port , port ) ;
2021-06-09 09:01:35 -07:00
if ( rc )
return ERR_PTR ( rc ) ;
rc = devm_cxl_link_uport ( host , port ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return port ;
err :
put_device ( dev ) ;
return ERR_PTR ( rc ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( devm_cxl_add_port , CXL ) ;
2021-06-09 09:01:35 -07:00
2022-01-31 08:44:52 -08:00
struct pci_bus * cxl_port_to_pci_bus ( struct cxl_port * port )
{
/* There is no pci_bus associated with a CXL platform-root port */
if ( is_cxl_root ( port ) )
return NULL ;
if ( dev_is_pci ( port - > uport ) ) {
struct pci_dev * pdev = to_pci_dev ( port - > uport ) ;
return pdev - > subordinate ;
}
return xa_load ( & cxl_root_buses , ( unsigned long ) port - > uport ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_port_to_pci_bus , CXL ) ;
static void unregister_pci_bus ( void * uport )
{
xa_erase ( & cxl_root_buses , ( unsigned long ) uport ) ;
}
int devm_cxl_register_pci_bus ( struct device * host , struct device * uport ,
struct pci_bus * bus )
{
int rc ;
if ( dev_is_pci ( uport ) )
return - EINVAL ;
rc = xa_insert ( & cxl_root_buses , ( unsigned long ) uport , bus , GFP_KERNEL ) ;
if ( rc )
return rc ;
return devm_add_action_or_reset ( host , unregister_pci_bus , uport ) ;
}
EXPORT_SYMBOL_NS_GPL ( devm_cxl_register_pci_bus , CXL ) ;
2022-02-04 07:08:40 -08:00
static bool dev_is_cxl_root_child ( struct device * dev )
2022-01-31 16:34:40 -08:00
{
struct cxl_port * port , * parent ;
if ( ! is_cxl_port ( dev ) )
2022-02-04 07:08:40 -08:00
return false ;
2022-01-31 16:34:40 -08:00
port = to_cxl_port ( dev ) ;
if ( is_cxl_root ( port ) )
2022-02-04 07:08:40 -08:00
return false ;
2022-01-31 16:34:40 -08:00
parent = to_cxl_port ( port - > dev . parent ) ;
2022-02-04 07:08:40 -08:00
if ( is_cxl_root ( parent ) )
return true ;
return false ;
}
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
static int match_root_child ( struct device * dev , const void * match )
{
const struct device * iter = NULL ;
struct cxl_dport * dport ;
struct cxl_port * port ;
if ( ! dev_is_cxl_root_child ( dev ) )
2022-01-31 16:34:40 -08:00
return 0 ;
2022-02-04 07:08:40 -08:00
port = to_cxl_port ( dev ) ;
2022-04-21 08:33:23 -07:00
device_lock ( dev ) ;
2022-01-31 16:34:40 -08:00
list_for_each_entry ( dport , & port - > dports , list ) {
iter = match ;
while ( iter ) {
if ( iter = = dport - > dport )
goto out ;
iter = iter - > parent ;
}
}
out :
2022-04-21 08:33:23 -07:00
device_unlock ( dev ) ;
2022-01-31 16:34:40 -08:00
return ! ! iter ;
}
struct cxl_port * find_cxl_root ( struct device * dev )
{
struct device * port_dev ;
struct cxl_port * root ;
port_dev = bus_find_device ( & cxl_bus_type , NULL , dev , match_root_child ) ;
if ( ! port_dev )
return NULL ;
root = to_cxl_port ( port_dev - > parent ) ;
get_device ( & root - > dev ) ;
put_device ( port_dev ) ;
return root ;
}
EXPORT_SYMBOL_NS_GPL ( find_cxl_root , CXL ) ;
2021-06-09 09:01:46 -07:00
static struct cxl_dport * find_dport ( struct cxl_port * port , int id )
{
struct cxl_dport * dport ;
device_lock_assert ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > port_id = = id )
return dport ;
return NULL ;
}
static int add_dport ( struct cxl_port * port , struct cxl_dport * new )
{
struct cxl_dport * dup ;
2022-01-31 17:07:38 -08:00
device_lock_assert ( & port - > dev ) ;
2021-06-09 09:01:46 -07:00
dup = find_dport ( port , new - > port_id ) ;
if ( dup )
dev_err ( & port - > dev ,
" unable to add dport%d-%s non-unique port id (%s) \n " ,
new - > port_id , dev_name ( new - > dport ) ,
dev_name ( dup - > dport ) ) ;
else
list_add_tail ( & new - > list , & port - > dports ) ;
return dup ? - EEXIST : 0 ;
}
2022-02-01 13:07:51 -08:00
/*
* Since root - level CXL dports cannot be enumerated by PCI they are not
* enumerated by the common port driver that acquires the port lock over
* dport add / remove . Instead , root dports are manually added by a
* platform driver and cond_cxl_root_lock ( ) is used to take the missing
* port lock in that case .
*/
static void cond_cxl_root_lock ( struct cxl_port * port )
{
if ( is_cxl_root ( port ) )
2022-04-21 08:33:23 -07:00
device_lock ( & port - > dev ) ;
2022-02-01 13:07:51 -08:00
}
static void cond_cxl_root_unlock ( struct cxl_port * port )
{
if ( is_cxl_root ( port ) )
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-01 13:07:51 -08:00
}
2022-01-31 18:10:04 -08:00
static void cxl_dport_remove ( void * data )
{
struct cxl_dport * dport = data ;
struct cxl_port * port = dport - > port ;
put_device ( dport - > dport ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_lock ( port ) ;
2022-01-31 18:10:04 -08:00
list_del ( & dport - > list ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_unlock ( port ) ;
2022-01-31 18:10:04 -08:00
}
static void cxl_dport_unlink ( void * data )
{
struct cxl_dport * dport = data ;
struct cxl_port * port = dport - > port ;
char link_name [ CXL_TARGET_STRLEN ] ;
sprintf ( link_name , " dport%d " , dport - > port_id ) ;
sysfs_remove_link ( & port - > dev . kobj , link_name ) ;
}
2021-06-09 09:01:46 -07:00
/**
2022-01-31 18:10:04 -08:00
* devm_cxl_add_dport - append downstream port data to a cxl_port
2021-06-09 09:01:46 -07:00
* @ port : the cxl_port that references this dport
* @ dport_dev : firmware or PCI device representing the dport
* @ port_id : identifier for this dport in a decoder ' s target list
* @ component_reg_phys : optional location of CXL component registers
*
2022-01-31 18:10:04 -08:00
* Note that dports are appended to the devm release action ' s of the
* either the port ' s host ( for root ports ) , or the port itself ( for
* switch ports )
2021-06-09 09:01:46 -07:00
*/
2022-02-01 13:23:14 -08:00
struct cxl_dport * devm_cxl_add_dport ( struct cxl_port * port ,
2022-01-31 18:10:04 -08:00
struct device * dport_dev , int port_id ,
resource_size_t component_reg_phys )
2021-06-09 09:01:46 -07:00
{
char link_name [ CXL_TARGET_STRLEN ] ;
struct cxl_dport * dport ;
2022-02-01 13:23:14 -08:00
struct device * host ;
2021-06-09 09:01:46 -07:00
int rc ;
2022-02-01 13:23:14 -08:00
if ( is_cxl_root ( port ) )
host = port - > uport ;
else
host = & port - > dev ;
2022-01-31 18:10:04 -08:00
if ( ! host - > driver ) {
dev_WARN_ONCE ( & port - > dev , 1 , " dport:%s bad devm context \n " ,
dev_name ( dport_dev ) ) ;
return ERR_PTR ( - ENXIO ) ;
}
2021-06-09 09:01:46 -07:00
if ( snprintf ( link_name , CXL_TARGET_STRLEN , " dport%d " , port_id ) > =
CXL_TARGET_STRLEN )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( - EINVAL ) ;
2021-06-09 09:01:46 -07:00
2022-01-31 18:10:04 -08:00
dport = devm_kzalloc ( host , sizeof ( * dport ) , GFP_KERNEL ) ;
2021-06-09 09:01:46 -07:00
if ( ! dport )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( - ENOMEM ) ;
2021-06-09 09:01:46 -07:00
INIT_LIST_HEAD ( & dport - > list ) ;
2022-01-31 18:10:04 -08:00
dport - > dport = dport_dev ;
2021-06-09 09:01:46 -07:00
dport - > port_id = port_id ;
dport - > component_reg_phys = component_reg_phys ;
dport - > port = port ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_lock ( port ) ;
2021-06-09 09:01:46 -07:00
rc = add_dport ( port , dport ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_unlock ( port ) ;
2021-06-09 09:01:46 -07:00
if ( rc )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( rc ) ;
get_device ( dport_dev ) ;
rc = devm_add_action_or_reset ( host , cxl_dport_remove , dport ) ;
if ( rc )
return ERR_PTR ( rc ) ;
2021-06-09 09:01:46 -07:00
rc = sysfs_create_link ( & port - > dev . kobj , & dport_dev - > kobj , link_name ) ;
if ( rc )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( rc ) ;
2021-06-09 09:01:46 -07:00
2022-01-31 18:10:04 -08:00
rc = devm_add_action_or_reset ( host , cxl_dport_unlink , dport ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return dport ;
2021-06-09 09:01:46 -07:00
}
2022-01-31 18:10:04 -08:00
EXPORT_SYMBOL_NS_GPL ( devm_cxl_add_dport , CXL ) ;
2021-06-09 09:01:46 -07:00
2022-02-04 07:08:40 -08:00
static struct cxl_ep * find_ep ( struct cxl_port * port , struct device * ep_dev )
{
struct cxl_ep * ep ;
device_lock_assert ( & port - > dev ) ;
list_for_each_entry ( ep , & port - > endpoints , list )
if ( ep - > ep = = ep_dev )
return ep ;
return NULL ;
}
2022-05-27 00:56:59 -07:00
static int add_ep ( struct cxl_ep * new )
2022-02-04 07:08:40 -08:00
{
2022-05-27 00:56:59 -07:00
struct cxl_port * port = new - > dport - > port ;
2022-02-04 07:08:40 -08:00
struct cxl_ep * dup ;
2022-04-21 08:33:23 -07:00
device_lock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( port - > dead ) {
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
return - ENXIO ;
}
dup = find_ep ( port , new - > ep ) ;
if ( ! dup )
list_add_tail ( & new - > list , & port - > endpoints ) ;
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
return dup ? - EEXIST : 0 ;
}
/**
* cxl_add_ep - register an endpoint ' s interest in a port
2022-05-27 00:56:59 -07:00
* @ dport : the dport that routes to @ ep_dev
2022-02-04 07:08:40 -08:00
* @ ep_dev : device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints .
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed .
*/
2022-05-27 00:56:59 -07:00
static int cxl_add_ep ( struct cxl_dport * dport , struct device * ep_dev )
2022-02-04 07:08:40 -08:00
{
struct cxl_ep * ep ;
int rc ;
ep = kzalloc ( sizeof ( * ep ) , GFP_KERNEL ) ;
if ( ! ep )
return - ENOMEM ;
INIT_LIST_HEAD ( & ep - > list ) ;
ep - > ep = get_device ( ep_dev ) ;
2022-05-27 00:56:59 -07:00
ep - > dport = dport ;
2022-02-04 07:08:40 -08:00
2022-05-27 00:56:59 -07:00
rc = add_ep ( ep ) ;
2022-02-04 07:08:40 -08:00
if ( rc )
cxl_ep_release ( ep ) ;
return rc ;
}
struct cxl_find_port_ctx {
const struct device * dport_dev ;
const struct cxl_port * parent_port ;
2022-05-27 00:56:59 -07:00
struct cxl_dport * * dport ;
2022-02-04 07:08:40 -08:00
} ;
static int match_port_by_dport ( struct device * dev , const void * data )
{
const struct cxl_find_port_ctx * ctx = data ;
2022-05-27 00:56:59 -07:00
struct cxl_dport * dport ;
2022-02-04 07:08:40 -08:00
struct cxl_port * port ;
if ( ! is_cxl_port ( dev ) )
return 0 ;
if ( ctx - > parent_port & & dev - > parent ! = & ctx - > parent_port - > dev )
return 0 ;
port = to_cxl_port ( dev ) ;
2022-05-27 00:56:59 -07:00
dport = cxl_find_dport_by_dev ( port , ctx - > dport_dev ) ;
if ( ctx - > dport )
* ctx - > dport = dport ;
return dport ! = NULL ;
2022-02-04 07:08:40 -08:00
}
static struct cxl_port * __find_cxl_port ( struct cxl_find_port_ctx * ctx )
{
struct device * dev ;
if ( ! ctx - > dport_dev )
return NULL ;
dev = bus_find_device ( & cxl_bus_type , NULL , ctx , match_port_by_dport ) ;
if ( dev )
return to_cxl_port ( dev ) ;
return NULL ;
}
2022-05-27 00:56:59 -07:00
static struct cxl_port * find_cxl_port ( struct device * dport_dev ,
struct cxl_dport * * dport )
2022-02-04 07:08:40 -08:00
{
struct cxl_find_port_ctx ctx = {
. dport_dev = dport_dev ,
2022-05-27 00:56:59 -07:00
. dport = dport ,
2022-02-04 07:08:40 -08:00
} ;
2022-05-27 00:56:59 -07:00
struct cxl_port * port ;
2022-02-04 07:08:40 -08:00
2022-05-27 00:56:59 -07:00
port = __find_cxl_port ( & ctx ) ;
return port ;
2022-02-04 07:08:40 -08:00
}
static struct cxl_port * find_cxl_port_at ( struct cxl_port * parent_port ,
2022-05-27 00:56:59 -07:00
struct device * dport_dev ,
struct cxl_dport * * dport )
2022-02-04 07:08:40 -08:00
{
struct cxl_find_port_ctx ctx = {
. dport_dev = dport_dev ,
. parent_port = parent_port ,
2022-05-27 00:56:59 -07:00
. dport = dport ,
2022-02-04 07:08:40 -08:00
} ;
2022-05-27 00:56:59 -07:00
struct cxl_port * port ;
2022-02-04 07:08:40 -08:00
2022-05-27 00:56:59 -07:00
port = __find_cxl_port ( & ctx ) ;
return port ;
2022-02-04 07:08:40 -08:00
}
/*
* All users of grandparent ( ) are using it to walk PCIe - like swich port
* hierarchy . A PCIe switch is comprised of a bridge device representing the
* upstream switch port and N bridges representing downstream switch ports . When
* bridges stack the grand - parent of a downstream switch port is another
* downstream switch port in the immediate ancestor switch .
*/
static struct device * grandparent ( struct device * dev )
{
if ( dev & & dev - > parent )
return dev - > parent - > parent ;
return NULL ;
}
2022-02-04 07:18:31 -08:00
static void delete_endpoint ( void * data )
{
struct cxl_memdev * cxlmd = data ;
struct cxl_port * endpoint = dev_get_drvdata ( & cxlmd - > dev ) ;
struct cxl_port * parent_port ;
struct device * parent ;
2022-05-27 10:57:01 -07:00
parent_port = cxl_mem_find_port ( cxlmd , NULL ) ;
2022-02-04 07:18:31 -08:00
if ( ! parent_port )
2022-02-10 17:04:42 -08:00
goto out ;
2022-02-04 07:18:31 -08:00
parent = & parent_port - > dev ;
2022-04-21 08:33:23 -07:00
device_lock ( parent ) ;
2022-06-03 16:43:48 -07:00
if ( parent - > driver & & ! endpoint - > dead ) {
2022-02-04 07:18:31 -08:00
devm_release_action ( parent , cxl_unlink_uport , endpoint ) ;
devm_release_action ( parent , unregister_port , endpoint ) ;
}
2022-04-21 08:33:23 -07:00
device_unlock ( parent ) ;
2022-02-04 07:18:31 -08:00
put_device ( parent ) ;
2022-02-10 17:04:42 -08:00
out :
2022-02-04 07:18:31 -08:00
put_device ( & endpoint - > dev ) ;
}
int cxl_endpoint_autoremove ( struct cxl_memdev * cxlmd , struct cxl_port * endpoint )
{
struct device * dev = & cxlmd - > dev ;
get_device ( & endpoint - > dev ) ;
dev_set_drvdata ( dev , endpoint ) ;
return devm_add_action_or_reset ( dev , delete_endpoint , cxlmd ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_endpoint_autoremove , CXL ) ;
2022-02-04 07:08:40 -08:00
/*
* The natural end of life of a non - root ' cxl_port ' is when its parent port goes
* through a - > remove ( ) event ( " top-down " unregistration ) . The unnatural trigger
* for a port to be unregistered is when all memdevs beneath that port have gone
* through - > remove ( ) . This " bottom-up " removal selectively removes individual
* child ports manually . This depends on devm_cxl_add_port ( ) to not change is
* devm action registration order .
*/
static void delete_switch_port ( struct cxl_port * port , struct list_head * dports )
{
struct cxl_dport * dport , * _d ;
list_for_each_entry_safe ( dport , _d , dports , list ) {
devm_release_action ( & port - > dev , cxl_dport_unlink , dport ) ;
devm_release_action ( & port - > dev , cxl_dport_remove , dport ) ;
devm_kfree ( & port - > dev , dport ) ;
}
devm_release_action ( port - > dev . parent , cxl_unlink_uport , port ) ;
devm_release_action ( port - > dev . parent , unregister_port , port ) ;
}
static void cxl_detach_ep ( void * data )
{
struct cxl_memdev * cxlmd = data ;
struct device * iter ;
for ( iter = & cxlmd - > dev ; iter ; iter = grandparent ( iter ) ) {
struct device * dport_dev = grandparent ( iter ) ;
struct cxl_port * port , * parent_port ;
LIST_HEAD ( reap_dports ) ;
struct cxl_ep * ep ;
if ( ! dport_dev )
break ;
2022-05-27 00:56:59 -07:00
port = find_cxl_port ( dport_dev , NULL ) ;
2022-03-07 17:41:48 +08:00
if ( ! port )
continue ;
if ( is_cxl_root ( port ) ) {
2022-02-04 07:08:40 -08:00
put_device ( & port - > dev ) ;
continue ;
}
parent_port = to_cxl_port ( port - > dev . parent ) ;
2022-04-21 08:33:23 -07:00
device_lock ( & parent_port - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( ! parent_port - > dev . driver ) {
/*
* The bottom - up race to delete the port lost to a
* top - down port disable , give up here , because the
* parent_port - > remove ( ) will have cleaned up all
* descendants .
*/
2022-04-21 08:33:23 -07:00
device_unlock ( & parent_port - > dev ) ;
2022-02-04 07:08:40 -08:00
put_device ( & port - > dev ) ;
continue ;
}
2022-04-21 08:33:23 -07:00
device_lock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
ep = find_ep ( port , & cxlmd - > dev ) ;
dev_dbg ( & cxlmd - > dev , " disconnect %s from %s \n " ,
ep ? dev_name ( ep - > ep ) : " " , dev_name ( & port - > dev ) ) ;
cxl_ep_release ( ep ) ;
if ( ep & & ! port - > dead & & list_empty ( & port - > endpoints ) & &
! is_cxl_root ( parent_port ) ) {
/*
* This was the last ep attached to a dynamically
* enumerated port . Block new cxl_add_ep ( ) and garbage
* collect the port .
*/
port - > dead = true ;
list_splice_init ( & port - > dports , & reap_dports ) ;
}
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( ! list_empty ( & reap_dports ) ) {
dev_dbg ( & cxlmd - > dev , " delete %s \n " ,
dev_name ( & port - > dev ) ) ;
delete_switch_port ( port , & reap_dports ) ;
}
put_device ( & port - > dev ) ;
2022-04-21 08:33:23 -07:00
device_unlock ( & parent_port - > dev ) ;
2022-02-04 07:08:40 -08:00
}
}
static resource_size_t find_component_registers ( struct device * dev )
{
struct cxl_register_map map ;
struct pci_dev * pdev ;
/*
* Theoretically , CXL component registers can be hosted on a
* non - PCI device , in practice , only cxl_test hits this case .
*/
if ( ! dev_is_pci ( dev ) )
return CXL_RESOURCE_NONE ;
pdev = to_pci_dev ( dev ) ;
cxl_find_regblock ( pdev , CXL_REGLOC_RBI_COMPONENT , & map ) ;
return cxl_regmap_to_base ( pdev , & map ) ;
}
static int add_port_attach_ep ( struct cxl_memdev * cxlmd ,
struct device * uport_dev ,
struct device * dport_dev )
{
struct device * dparent = grandparent ( dport_dev ) ;
struct cxl_port * port , * parent_port = NULL ;
2022-05-27 10:57:01 -07:00
struct cxl_dport * dport , * parent_dport ;
2022-02-04 07:08:40 -08:00
resource_size_t component_reg_phys ;
int rc ;
if ( ! dparent ) {
/*
* The iteration reached the topology root without finding the
* CXL - root ' cxl_port ' on a previous iteration , fail for now to
* be re - probed after platform driver attaches .
*/
dev_dbg ( & cxlmd - > dev , " %s is a root dport \n " ,
dev_name ( dport_dev ) ) ;
return - ENXIO ;
}
2022-05-27 10:57:01 -07:00
parent_port = find_cxl_port ( dparent , & parent_dport ) ;
2022-02-04 07:08:40 -08:00
if ( ! parent_port ) {
/* iterate to create this parent_port */
return - EAGAIN ;
}
2022-04-21 08:33:23 -07:00
device_lock ( & parent_port - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( ! parent_port - > dev . driver ) {
dev_warn ( & cxlmd - > dev ,
" port %s:%s disabled, failed to enumerate CXL.mem \n " ,
dev_name ( & parent_port - > dev ) , dev_name ( uport_dev ) ) ;
port = ERR_PTR ( - ENXIO ) ;
goto out ;
}
2022-05-27 00:56:59 -07:00
port = find_cxl_port_at ( parent_port , dport_dev , & dport ) ;
2022-02-04 07:08:40 -08:00
if ( ! port ) {
component_reg_phys = find_component_registers ( uport_dev ) ;
port = devm_cxl_add_port ( & parent_port - > dev , uport_dev ,
2022-05-27 10:57:01 -07:00
component_reg_phys , parent_dport ) ;
2022-05-27 00:56:59 -07:00
/* retry find to pick up the new dport information */
2022-02-04 07:08:40 -08:00
if ( ! IS_ERR ( port ) )
2022-05-27 00:56:59 -07:00
port = find_cxl_port_at ( parent_port , dport_dev , & dport ) ;
2022-02-04 07:08:40 -08:00
}
out :
2022-04-21 08:33:23 -07:00
device_unlock ( & parent_port - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( IS_ERR ( port ) )
rc = PTR_ERR ( port ) ;
else {
dev_dbg ( & cxlmd - > dev , " add to new port %s:%s \n " ,
dev_name ( & port - > dev ) , dev_name ( port - > uport ) ) ;
2022-05-27 00:56:59 -07:00
rc = cxl_add_ep ( dport , & cxlmd - > dev ) ;
2022-02-04 07:08:40 -08:00
if ( rc = = - EEXIST ) {
/*
* " can't " happen , but this error code means
* something to the caller , so translate it .
*/
rc = - ENXIO ;
}
put_device ( & port - > dev ) ;
}
put_device ( & parent_port - > dev ) ;
return rc ;
}
int devm_cxl_enumerate_ports ( struct cxl_memdev * cxlmd )
{
struct device * dev = & cxlmd - > dev ;
struct device * iter ;
int rc ;
rc = devm_add_action_or_reset ( & cxlmd - > dev , cxl_detach_ep , cxlmd ) ;
if ( rc )
return rc ;
/*
* Scan for and add all cxl_ports in this device ' s ancestry .
* Repeat until no more ports are added . Abort if a port add
* attempt fails .
*/
retry :
for ( iter = dev ; iter ; iter = grandparent ( iter ) ) {
struct device * dport_dev = grandparent ( iter ) ;
struct device * uport_dev ;
2022-05-27 00:56:59 -07:00
struct cxl_dport * dport ;
2022-02-04 07:08:40 -08:00
struct cxl_port * port ;
if ( ! dport_dev )
return 0 ;
uport_dev = dport_dev - > parent ;
if ( ! uport_dev ) {
dev_warn ( dev , " at %s no parent for dport: %s \n " ,
dev_name ( iter ) , dev_name ( dport_dev ) ) ;
return - ENXIO ;
}
dev_dbg ( dev , " scan: iter: %s dport_dev: %s parent: %s \n " ,
dev_name ( iter ) , dev_name ( dport_dev ) ,
dev_name ( uport_dev ) ) ;
2022-05-27 00:56:59 -07:00
port = find_cxl_port ( dport_dev , & dport ) ;
2022-02-04 07:08:40 -08:00
if ( port ) {
dev_dbg ( & cxlmd - > dev ,
" found already registered port %s:%s \n " ,
dev_name ( & port - > dev ) , dev_name ( port - > uport ) ) ;
2022-05-27 00:56:59 -07:00
rc = cxl_add_ep ( dport , & cxlmd - > dev ) ;
2022-02-04 07:08:40 -08:00
/*
* If the endpoint already exists in the port ' s list ,
* that ' s ok , it was added on a previous pass .
* Otherwise , retry in add_port_attach_ep ( ) after taking
* the parent_port lock as the current port may be being
* reaped .
*/
if ( rc & & rc ! = - EEXIST ) {
put_device ( & port - > dev ) ;
return rc ;
}
/* Any more ports to add between this one and the root? */
if ( ! dev_is_cxl_root_child ( & port - > dev ) ) {
put_device ( & port - > dev ) ;
continue ;
}
put_device ( & port - > dev ) ;
return 0 ;
}
rc = add_port_attach_ep ( cxlmd , uport_dev , dport_dev ) ;
/* port missing, try to add parent */
if ( rc = = - EAGAIN )
continue ;
/* failed to add ep or port */
if ( rc )
return rc ;
/* port added, new descendants possible, start over */
goto retry ;
}
return 0 ;
}
EXPORT_SYMBOL_NS_GPL ( devm_cxl_enumerate_ports , CXL ) ;
2022-05-27 10:57:01 -07:00
struct cxl_port * cxl_mem_find_port ( struct cxl_memdev * cxlmd ,
struct cxl_dport * * dport )
2022-02-04 07:18:31 -08:00
{
2022-05-27 10:57:01 -07:00
return find_cxl_port ( grandparent ( & cxlmd - > dev ) , dport ) ;
2022-02-04 07:18:31 -08:00
}
EXPORT_SYMBOL_NS_GPL ( cxl_mem_find_port , CXL ) ;
2022-02-04 07:08:40 -08:00
struct cxl_dport * cxl_find_dport_by_dev ( struct cxl_port * port ,
const struct device * dev )
{
struct cxl_dport * dport ;
2022-04-21 08:33:23 -07:00
device_lock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > dport = = dev ) {
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
return dport ;
}
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-04 07:08:40 -08:00
return NULL ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_find_dport_by_dev , CXL ) ;
2022-05-18 17:52:23 -07:00
static int decoder_populate_targets ( struct cxl_switch_decoder * cxlsd ,
2021-09-21 12:22:16 -07:00
struct cxl_port * port , int * target_map )
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
{
2022-02-01 12:24:30 -08:00
int i , rc = 0 ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
if ( ! target_map )
return 0 ;
2022-02-01 12:24:30 -08:00
device_lock_assert ( & port - > dev ) ;
if ( list_empty ( & port - > dports ) )
return - EINVAL ;
2021-09-21 12:22:16 -07:00
2022-05-18 17:52:23 -07:00
write_seqlock ( & cxlsd - > target_lock ) ;
for ( i = 0 ; i < cxlsd - > nr_targets ; i + + ) {
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
struct cxl_dport * dport = find_dport ( port , target_map [ i ] ) ;
if ( ! dport ) {
rc = - ENXIO ;
2022-01-31 15:35:18 -08:00
break ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
}
2022-05-18 17:52:23 -07:00
cxlsd - > target [ i ] = dport ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
}
2022-05-18 17:52:23 -07:00
write_sequnlock ( & cxlsd - > target_lock ) ;
2021-09-21 12:22:16 -07:00
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
return rc ;
}
2022-04-21 08:33:13 -07:00
static struct lock_class_key cxl_decoder_key ;
2022-01-31 13:33:13 -08:00
/**
2022-05-18 17:52:23 -07:00
* cxl_decoder_init - Common decoder setup / initialization
2022-01-31 13:33:13 -08:00
* @ port : owning port of this decoder
2022-05-18 17:52:23 -07:00
* @ cxld : common decoder properties to initialize
2022-01-31 13:33:13 -08:00
*
2022-05-18 17:52:23 -07:00
* A port may contain one or more decoders . Each of those decoders
* enable some address space for CXL . mem utilization . A decoder is
* expected to be configured by the caller before registering via
* cxl_decoder_add ( )
2022-01-31 13:33:13 -08:00
*/
2022-05-18 17:52:23 -07:00
static int cxl_decoder_init ( struct cxl_port * port , struct cxl_decoder * cxld )
2021-06-09 09:43:29 -07:00
{
struct device * dev ;
2022-05-18 17:52:23 -07:00
int rc ;
2021-06-09 09:43:29 -07:00
rc = ida_alloc ( & port - > decoder_ida , GFP_KERNEL ) ;
if ( rc < 0 )
2022-05-18 17:52:23 -07:00
return rc ;
2021-06-09 09:43:29 -07:00
2022-02-16 16:25:11 -08:00
/* need parent to stick around to release the id */
get_device ( & port - > dev ) ;
2021-09-21 12:22:16 -07:00
cxld - > id = rc ;
2022-02-16 16:25:11 -08:00
2021-06-09 09:43:29 -07:00
dev = & cxld - > dev ;
device_initialize ( dev ) ;
2022-04-21 08:33:13 -07:00
lockdep_set_class ( & dev - > mutex , & cxl_decoder_key ) ;
2021-06-09 09:43:29 -07:00
device_set_pm_not_required ( dev ) ;
dev - > parent = & port - > dev ;
dev - > bus = & cxl_bus_type ;
2022-01-23 16:29:47 -08:00
/* Pre initialize an "empty" decoder */
cxld - > interleave_ways = 1 ;
cxld - > interleave_granularity = PAGE_SIZE ;
cxld - > target_type = CXL_DECODER_EXPANDER ;
2022-05-18 18:02:39 -07:00
cxld - > hpa_range = ( struct range ) {
. start = 0 ,
. end = - 1 ,
} ;
2022-01-23 16:29:47 -08:00
2022-05-18 17:52:23 -07:00
return 0 ;
}
static int cxl_switch_decoder_init ( struct cxl_port * port ,
struct cxl_switch_decoder * cxlsd ,
int nr_targets )
{
if ( nr_targets > CXL_DECODER_MAX_INTERLEAVE )
return - EINVAL ;
cxlsd - > nr_targets = nr_targets ;
seqlock_init ( & cxlsd - > target_lock ) ;
return cxl_decoder_init ( port , & cxlsd - > cxld ) ;
2021-06-09 09:43:29 -07:00
}
2022-01-31 13:33:13 -08:00
/**
* cxl_root_decoder_alloc - Allocate a root level decoder
* @ port : owning CXL root of this decoder
* @ nr_targets : static number of downstream targets
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( ) . A
* ' CXL root ' decoder is one that decodes from a top - level / static platform
* firmware description of CXL resources into a CXL standard decode
* topology .
*/
2022-07-12 18:38:26 -07:00
struct cxl_root_decoder * cxl_root_decoder_alloc ( struct cxl_port * port ,
unsigned int nr_targets )
2022-01-31 13:33:13 -08:00
{
2022-07-12 18:38:26 -07:00
struct cxl_root_decoder * cxlrd ;
2022-05-18 17:52:23 -07:00
struct cxl_switch_decoder * cxlsd ;
struct cxl_decoder * cxld ;
int rc ;
2022-01-31 13:33:13 -08:00
if ( ! is_cxl_root ( port ) )
return ERR_PTR ( - EINVAL ) ;
2022-07-12 18:38:26 -07:00
cxlrd = kzalloc ( struct_size ( cxlrd , cxlsd . target , nr_targets ) ,
GFP_KERNEL ) ;
if ( ! cxlrd )
2022-05-18 17:52:23 -07:00
return ERR_PTR ( - ENOMEM ) ;
2022-07-12 18:38:26 -07:00
cxlsd = & cxlrd - > cxlsd ;
2022-05-18 17:52:23 -07:00
rc = cxl_switch_decoder_init ( port , cxlsd , nr_targets ) ;
if ( rc ) {
2022-07-12 18:38:26 -07:00
kfree ( cxlrd ) ;
2022-05-18 17:52:23 -07:00
return ERR_PTR ( rc ) ;
}
cxld = & cxlsd - > cxld ;
cxld - > dev . type = & cxl_decoder_root_type ;
2022-07-12 18:38:26 -07:00
return cxlrd ;
2022-01-31 13:33:13 -08:00
}
EXPORT_SYMBOL_NS_GPL ( cxl_root_decoder_alloc , CXL ) ;
/**
* cxl_switch_decoder_alloc - Allocate a switch level decoder
* @ port : owning CXL switch port of this decoder
* @ nr_targets : max number of dynamically addressable downstream targets
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( ) . A
* ' switch ' decoder is any decoder that can be enumerated by PCIe
* topology and the HDM Decoder Capability . This includes the decoders
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports .
*/
2022-05-18 17:52:23 -07:00
struct cxl_switch_decoder * cxl_switch_decoder_alloc ( struct cxl_port * port ,
unsigned int nr_targets )
2022-01-31 13:33:13 -08:00
{
2022-05-18 17:52:23 -07:00
struct cxl_switch_decoder * cxlsd ;
struct cxl_decoder * cxld ;
int rc ;
2022-02-02 20:02:06 -08:00
if ( is_cxl_root ( port ) | | is_cxl_endpoint ( port ) )
2022-01-31 13:33:13 -08:00
return ERR_PTR ( - EINVAL ) ;
2022-05-18 17:52:23 -07:00
cxlsd = kzalloc ( struct_size ( cxlsd , target , nr_targets ) , GFP_KERNEL ) ;
if ( ! cxlsd )
return ERR_PTR ( - ENOMEM ) ;
rc = cxl_switch_decoder_init ( port , cxlsd , nr_targets ) ;
if ( rc ) {
kfree ( cxlsd ) ;
return ERR_PTR ( rc ) ;
}
cxld = & cxlsd - > cxld ;
cxld - > dev . type = & cxl_decoder_switch_type ;
return cxlsd ;
2022-01-31 13:33:13 -08:00
}
EXPORT_SYMBOL_NS_GPL ( cxl_switch_decoder_alloc , CXL ) ;
2022-02-02 20:02:06 -08:00
/**
* cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
* @ port : owning port of this decoder
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( )
*/
2022-05-21 16:24:14 -07:00
struct cxl_endpoint_decoder * cxl_endpoint_decoder_alloc ( struct cxl_port * port )
2022-02-02 20:02:06 -08:00
{
2022-05-21 16:24:14 -07:00
struct cxl_endpoint_decoder * cxled ;
2022-05-18 17:52:23 -07:00
struct cxl_decoder * cxld ;
int rc ;
2022-02-02 20:02:06 -08:00
if ( ! is_cxl_endpoint ( port ) )
return ERR_PTR ( - EINVAL ) ;
2022-05-21 16:24:14 -07:00
cxled = kzalloc ( sizeof ( * cxled ) , GFP_KERNEL ) ;
if ( ! cxled )
2022-05-18 17:52:23 -07:00
return ERR_PTR ( - ENOMEM ) ;
2022-05-21 16:24:14 -07:00
cxld = & cxled - > cxld ;
2022-05-18 17:52:23 -07:00
rc = cxl_decoder_init ( port , cxld ) ;
if ( rc ) {
2022-05-21 16:24:14 -07:00
kfree ( cxled ) ;
2022-05-18 17:52:23 -07:00
return ERR_PTR ( rc ) ;
}
cxld - > dev . type = & cxl_decoder_endpoint_type ;
2022-05-21 16:24:14 -07:00
return cxled ;
2022-02-02 20:02:06 -08:00
}
EXPORT_SYMBOL_NS_GPL ( cxl_endpoint_decoder_alloc , CXL ) ;
2022-01-31 13:33:13 -08:00
/**
2022-02-01 12:24:30 -08:00
* cxl_decoder_add_locked - Add a decoder with targets
2022-05-18 17:52:23 -07:00
* @ cxld : The cxl decoder allocated by cxl_ < type > _decoder_alloc ( )
2022-01-31 13:33:13 -08:00
* @ target_map : A list of downstream ports that this decoder can direct memory
* traffic to . These numbers should correspond with the port number
* in the PCIe Link Capabilities structure .
*
* Certain types of decoders may not have any targets . The main example of this
* is an endpoint device . A more awkward example is a hostbridge whose root
* ports get hot added ( technically possible , though unlikely ) .
*
2022-02-01 12:24:30 -08:00
* This is the locked variant of cxl_decoder_add ( ) .
*
* Context : Process context . Expects the device lock of the port that owns the
* @ cxld to be held .
2022-01-31 13:33:13 -08:00
*
* Return : Negative error code if the decoder wasn ' t properly configured ; else
* returns 0.
*/
2022-02-01 12:24:30 -08:00
int cxl_decoder_add_locked ( struct cxl_decoder * cxld , int * target_map )
2021-06-09 09:43:29 -07:00
{
2021-09-21 12:22:16 -07:00
struct cxl_port * port ;
2021-06-09 09:43:29 -07:00
struct device * dev ;
int rc ;
2021-09-21 12:22:16 -07:00
if ( WARN_ON_ONCE ( ! cxld ) )
return - EINVAL ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
if ( WARN_ON_ONCE ( IS_ERR ( cxld ) ) )
return PTR_ERR ( cxld ) ;
2021-06-09 09:43:29 -07:00
2021-09-21 12:22:16 -07:00
if ( cxld - > interleave_ways < 1 )
return - EINVAL ;
2021-06-09 09:43:29 -07:00
2022-02-02 20:02:06 -08:00
dev = & cxld - > dev ;
2021-09-21 12:22:16 -07:00
port = to_cxl_port ( cxld - > dev . parent ) ;
2022-02-02 20:02:06 -08:00
if ( ! is_endpoint_decoder ( dev ) ) {
2022-05-18 17:52:23 -07:00
struct cxl_switch_decoder * cxlsd = to_cxl_switch_decoder ( dev ) ;
rc = decoder_populate_targets ( cxlsd , port , target_map ) ;
2022-01-25 21:24:04 -08:00
if ( rc & & ( cxld - > flags & CXL_DECODER_F_ENABLE ) ) {
dev_err ( & port - > dev ,
" Failed to populate active decoder targets \n " ) ;
2022-02-02 20:02:06 -08:00
return rc ;
2022-01-25 21:24:04 -08:00
}
2022-02-02 20:02:06 -08:00
}
2021-06-09 09:43:29 -07:00
2021-09-21 12:22:16 -07:00
rc = dev_set_name ( dev , " decoder%d.%d " , port - > id , cxld - > id ) ;
2021-06-09 09:43:29 -07:00
if ( rc )
2021-09-21 12:22:16 -07:00
return rc ;
2021-06-09 09:43:29 -07:00
2021-09-21 12:22:16 -07:00
return device_add ( dev ) ;
2021-06-09 09:43:29 -07:00
}
2022-02-01 12:24:30 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_add_locked , CXL ) ;
/**
* cxl_decoder_add - Add a decoder with targets
2022-05-18 17:52:23 -07:00
* @ cxld : The cxl decoder allocated by cxl_ < type > _decoder_alloc ( )
2022-02-01 12:24:30 -08:00
* @ target_map : A list of downstream ports that this decoder can direct memory
* traffic to . These numbers should correspond with the port number
* in the PCIe Link Capabilities structure .
*
* This is the unlocked variant of cxl_decoder_add_locked ( ) .
* See cxl_decoder_add_locked ( ) .
*
* Context : Process context . Takes and releases the device lock of the port that
* owns the @ cxld .
*/
int cxl_decoder_add ( struct cxl_decoder * cxld , int * target_map )
{
struct cxl_port * port ;
int rc ;
if ( WARN_ON_ONCE ( ! cxld ) )
return - EINVAL ;
if ( WARN_ON_ONCE ( IS_ERR ( cxld ) ) )
return PTR_ERR ( cxld ) ;
port = to_cxl_port ( cxld - > dev . parent ) ;
2022-04-21 08:33:23 -07:00
device_lock ( & port - > dev ) ;
2022-02-01 12:24:30 -08:00
rc = cxl_decoder_add_locked ( cxld , target_map ) ;
2022-04-21 08:33:23 -07:00
device_unlock ( & port - > dev ) ;
2022-02-01 12:24:30 -08:00
return rc ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_add , CXL ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
static void cxld_unregister ( void * dev )
{
device_unregister ( dev ) ;
}
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
int cxl_decoder_autoremove ( struct device * host , struct cxl_decoder * cxld )
{
return devm_add_action_or_reset ( host , cxld_unregister , & cxld - > dev ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_autoremove , CXL ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-06-15 16:18:11 -07:00
/**
* __cxl_driver_register - register a driver for the cxl bus
* @ cxl_drv : cxl driver structure to attach
* @ owner : owning module / driver
* @ modname : KBUILD_MODNAME for parent driver
*/
int __cxl_driver_register ( struct cxl_driver * cxl_drv , struct module * owner ,
const char * modname )
{
if ( ! cxl_drv - > probe ) {
pr_debug ( " %s ->probe() must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > name ) {
pr_debug ( " %s ->name must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > id ) {
pr_debug ( " %s ->id must be specified \n " , modname ) ;
return - EINVAL ;
}
cxl_drv - > drv . bus = & cxl_bus_type ;
cxl_drv - > drv . owner = owner ;
cxl_drv - > drv . mod_name = modname ;
cxl_drv - > drv . name = cxl_drv - > name ;
return driver_register ( & cxl_drv - > drv ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( __cxl_driver_register , CXL ) ;
2021-06-15 16:18:11 -07:00
void cxl_driver_unregister ( struct cxl_driver * cxl_drv )
{
driver_unregister ( & cxl_drv - > drv ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_driver_unregister , CXL ) ;
2021-06-15 16:18:11 -07:00
static int cxl_bus_uevent ( struct device * dev , struct kobj_uevent_env * env )
{
return add_uevent_var ( env , " MODALIAS= " CXL_MODALIAS_FMT ,
cxl_device_id ( dev ) ) ;
}
static int cxl_bus_match ( struct device * dev , struct device_driver * drv )
{
return cxl_device_id ( dev ) = = to_cxl_drv ( drv ) - > id ;
}
static int cxl_bus_probe ( struct device * dev )
{
2022-01-31 11:50:09 -08:00
int rc ;
rc = to_cxl_drv ( dev - > driver ) - > probe ( dev ) ;
2022-02-01 13:07:51 -08:00
dev_dbg ( dev , " probe: %d \n " , rc ) ;
2022-01-31 11:50:09 -08:00
return rc ;
2021-06-15 16:18:11 -07:00
}
2021-07-13 21:35:22 +02:00
static void cxl_bus_remove ( struct device * dev )
2021-06-15 16:18:11 -07:00
{
struct cxl_driver * cxl_drv = to_cxl_drv ( dev - > driver ) ;
if ( cxl_drv - > remove )
cxl_drv - > remove ( dev ) ;
}
2022-02-04 07:18:31 -08:00
static struct workqueue_struct * cxl_bus_wq ;
int cxl_bus_rescan ( void )
{
return bus_rescan_devices ( & cxl_bus_type ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_bus_rescan , CXL ) ;
bool schedule_cxl_memdev_detach ( struct cxl_memdev * cxlmd )
{
return queue_work ( cxl_bus_wq , & cxlmd - > detach_work ) ;
}
EXPORT_SYMBOL_NS_GPL ( schedule_cxl_memdev_detach , CXL ) ;
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store ( struct bus_type * bus , const char * buf , size_t count )
{
if ( sysfs_streq ( buf , " 1 " ) ) {
flush_workqueue ( cxl_bus_wq ) ;
return count ;
}
return - EINVAL ;
}
static BUS_ATTR_WO ( flush ) ;
static struct attribute * cxl_bus_attributes [ ] = {
& bus_attr_flush . attr ,
NULL ,
} ;
static struct attribute_group cxl_bus_attribute_group = {
. attrs = cxl_bus_attributes ,
} ;
static const struct attribute_group * cxl_bus_attribute_groups [ ] = {
& cxl_bus_attribute_group ,
NULL ,
} ;
2021-02-16 20:09:52 -08:00
struct bus_type cxl_bus_type = {
. name = " cxl " ,
2021-06-15 16:18:11 -07:00
. uevent = cxl_bus_uevent ,
. match = cxl_bus_match ,
. probe = cxl_bus_probe ,
. remove = cxl_bus_remove ,
2022-02-04 07:18:31 -08:00
. bus_groups = cxl_bus_attribute_groups ,
2021-02-16 20:09:52 -08:00
} ;
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_bus_type , CXL ) ;
2021-02-16 20:09:52 -08:00
2022-07-10 09:57:28 -07:00
static struct dentry * cxl_debugfs ;
struct dentry * cxl_debugfs_create_dir ( const char * dir )
{
return debugfs_create_dir ( dir , cxl_debugfs ) ;
}
2022-05-26 12:15:25 -07:00
EXPORT_SYMBOL_NS_GPL ( cxl_debugfs_create_dir , CXL ) ;
2022-07-10 09:57:28 -07:00
2021-05-13 22:22:00 -07:00
static __init int cxl_core_init ( void )
2021-02-16 20:09:52 -08:00
{
2021-08-02 10:30:05 -07:00
int rc ;
2022-07-10 09:57:28 -07:00
cxl_debugfs = debugfs_create_dir ( " cxl " , NULL ) ;
2021-09-08 22:12:32 -07:00
cxl_mbox_init ( ) ;
2021-08-02 10:30:05 -07:00
rc = cxl_memdev_init ( ) ;
if ( rc )
return rc ;
2022-02-04 07:18:31 -08:00
cxl_bus_wq = alloc_ordered_workqueue ( " cxl_port " , 0 ) ;
if ( ! cxl_bus_wq ) {
rc = - ENOMEM ;
goto err_wq ;
}
2021-08-02 10:30:05 -07:00
rc = bus_register ( & cxl_bus_type ) ;
if ( rc )
2022-02-04 07:18:31 -08:00
goto err_bus ;
2021-08-02 10:30:05 -07:00
return 0 ;
2022-02-04 07:18:31 -08:00
err_bus :
destroy_workqueue ( cxl_bus_wq ) ;
err_wq :
2021-08-02 10:30:05 -07:00
cxl_memdev_exit ( ) ;
return rc ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
static void cxl_core_exit ( void )
2021-02-16 20:09:52 -08:00
{
bus_unregister ( & cxl_bus_type ) ;
2022-02-04 07:18:31 -08:00
destroy_workqueue ( cxl_bus_wq ) ;
2021-08-02 10:30:05 -07:00
cxl_memdev_exit ( ) ;
2022-07-10 09:57:28 -07:00
debugfs_remove_recursive ( cxl_debugfs ) ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
module_init ( cxl_core_init ) ;
module_exit ( cxl_core_exit ) ;
2021-02-16 20:09:52 -08:00
MODULE_LICENSE ( " GPL v2 " ) ;