2021-02-16 20:09:52 -08:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
2021-05-13 22:22:05 -07:00
# include <linux/io-64-nonatomic-lo-hi.h>
2022-02-04 07:18:31 -08:00
# include <linux/workqueue.h>
2021-02-16 20:09:52 -08:00
# include <linux/device.h>
# include <linux/module.h>
2021-06-03 17:50:36 -07:00
# include <linux/pci.h>
2021-06-09 09:01:35 -07:00
# include <linux/slab.h>
# include <linux/idr.h>
2021-08-02 10:29:38 -07:00
# include <cxlmem.h>
2022-02-04 07:08:40 -08:00
# include <cxlpci.h>
2021-08-02 10:29:38 -07:00
# include <cxl.h>
2021-08-02 10:29:49 -07:00
# include "core.h"
2021-02-16 20:09:52 -08:00
/**
2021-05-13 22:22:00 -07:00
* DOC : cxl core
2021-02-16 20:09:52 -08:00
*
2021-08-02 10:29:43 -07:00
* The CXL core provides a set of interfaces that can be consumed by CXL aware
* drivers . The interfaces allow for creation , modification , and destruction of
* regions , memory devices , ports , and decoders . CXL aware drivers must register
* with the CXL core via these interfaces in order to be able to participate in
* cross - device interleave coordination . The CXL core also establishes and
* maintains the bridge to the nvdimm subsystem .
*
* CXL core introduces sysfs hierarchy to control the devices that are
* instantiated by the core .
2021-02-16 20:09:52 -08:00
*/
2021-05-13 22:22:00 -07:00
2021-06-09 09:01:35 -07:00
static DEFINE_IDA ( cxl_port_ida ) ;
2022-01-31 08:44:52 -08:00
static DEFINE_XARRAY ( cxl_root_buses ) ;
2021-06-09 09:01:35 -07:00
static ssize_t devtype_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sysfs_emit ( buf , " %s \n " , dev - > type - > name ) ;
}
static DEVICE_ATTR_RO ( devtype ) ;
2022-01-23 16:30:41 -08:00
static int cxl_device_id ( struct device * dev )
{
if ( dev - > type = = & cxl_nvdimm_bridge_type )
return CXL_DEVICE_NVDIMM_BRIDGE ;
if ( dev - > type = = & cxl_nvdimm_type )
return CXL_DEVICE_NVDIMM ;
2022-02-01 13:07:51 -08:00
if ( is_cxl_port ( dev ) ) {
if ( is_cxl_root ( to_cxl_port ( dev ) ) )
return CXL_DEVICE_ROOT ;
return CXL_DEVICE_PORT ;
}
2022-02-04 07:18:31 -08:00
if ( is_cxl_memdev ( dev ) )
return CXL_DEVICE_MEMORY_EXPANDER ;
2022-01-23 16:30:41 -08:00
return 0 ;
}
static ssize_t modalias_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sysfs_emit ( buf , CXL_MODALIAS_FMT " \n " , cxl_device_id ( dev ) ) ;
}
static DEVICE_ATTR_RO ( modalias ) ;
2021-06-09 09:01:35 -07:00
static struct attribute * cxl_base_attributes [ ] = {
& dev_attr_devtype . attr ,
2022-01-23 16:30:41 -08:00
& dev_attr_modalias . attr ,
2021-06-09 09:01:35 -07:00
NULL ,
} ;
2021-08-02 10:29:49 -07:00
struct attribute_group cxl_base_attribute_group = {
2021-06-09 09:01:35 -07:00
. attrs = cxl_base_attributes ,
} ;
2021-06-09 09:43:29 -07:00
static ssize_t start_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
2022-01-23 16:29:31 -08:00
u64 start ;
2021-06-09 09:43:29 -07:00
2022-01-23 16:29:31 -08:00
if ( is_root_decoder ( dev ) )
start = cxld - > platform_res . start ;
else
start = cxld - > decoder_range . start ;
return sysfs_emit ( buf , " %#llx \n " , start ) ;
2021-06-09 09:43:29 -07:00
}
2022-01-23 16:29:26 -08:00
static DEVICE_ATTR_ADMIN_RO ( start ) ;
2021-06-09 09:43:29 -07:00
static ssize_t size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
2022-01-23 16:29:31 -08:00
u64 size ;
if ( is_root_decoder ( dev ) )
size = resource_size ( & cxld - > platform_res ) ;
else
size = range_len ( & cxld - > decoder_range ) ;
2021-06-09 09:43:29 -07:00
2022-01-23 16:29:31 -08:00
return sysfs_emit ( buf , " %#llx \n " , size ) ;
2021-06-09 09:43:29 -07:00
}
static DEVICE_ATTR_RO ( size ) ;
# define CXL_DECODER_FLAG_ATTR(name, flag) \
static ssize_t name # # _show ( struct device * dev , \
struct device_attribute * attr , char * buf ) \
{ \
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ; \
\
return sysfs_emit ( buf , " %s \n " , \
( cxld - > flags & ( flag ) ) ? " 1 " : " 0 " ) ; \
} \
static DEVICE_ATTR_RO ( name )
CXL_DECODER_FLAG_ATTR ( cap_pmem , CXL_DECODER_F_PMEM ) ;
CXL_DECODER_FLAG_ATTR ( cap_ram , CXL_DECODER_F_RAM ) ;
CXL_DECODER_FLAG_ATTR ( cap_type2 , CXL_DECODER_F_TYPE2 ) ;
CXL_DECODER_FLAG_ATTR ( cap_type3 , CXL_DECODER_F_TYPE3 ) ;
CXL_DECODER_FLAG_ATTR ( locked , CXL_DECODER_F_LOCK ) ;
static ssize_t target_type_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
switch ( cxld - > target_type ) {
case CXL_DECODER_ACCELERATOR :
return sysfs_emit ( buf , " accelerator \n " ) ;
case CXL_DECODER_EXPANDER :
return sysfs_emit ( buf , " expander \n " ) ;
}
return - ENXIO ;
}
static DEVICE_ATTR_RO ( target_type ) ;
2022-01-31 15:35:18 -08:00
static ssize_t emit_target_list ( struct cxl_decoder * cxld , char * buf )
2021-06-09 09:43:29 -07:00
{
ssize_t offset = 0 ;
int i , rc = 0 ;
for ( i = 0 ; i < cxld - > interleave_ways ; i + + ) {
struct cxl_dport * dport = cxld - > target [ i ] ;
struct cxl_dport * next = NULL ;
if ( ! dport )
break ;
if ( i + 1 < cxld - > interleave_ways )
next = cxld - > target [ i + 1 ] ;
rc = sysfs_emit_at ( buf , offset , " %d%s " , dport - > port_id ,
next ? " , " : " " ) ;
if ( rc < 0 )
2022-01-31 15:35:18 -08:00
return rc ;
2021-06-09 09:43:29 -07:00
offset + = rc ;
}
2022-01-31 15:35:18 -08:00
return offset ;
}
static ssize_t target_list_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
ssize_t offset ;
unsigned int seq ;
int rc ;
do {
seq = read_seqbegin ( & cxld - > target_lock ) ;
rc = emit_target_list ( cxld , buf ) ;
} while ( read_seqretry ( & cxld - > target_lock , seq ) ) ;
2021-06-09 09:43:29 -07:00
if ( rc < 0 )
return rc ;
2022-01-31 15:35:18 -08:00
offset = rc ;
2021-06-09 09:43:29 -07:00
rc = sysfs_emit_at ( buf , offset , " \n " ) ;
if ( rc < 0 )
return rc ;
return offset + rc ;
}
static DEVICE_ATTR_RO ( target_list ) ;
static struct attribute * cxl_decoder_base_attrs [ ] = {
& dev_attr_start . attr ,
& dev_attr_size . attr ,
& dev_attr_locked . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_base_attribute_group = {
. attrs = cxl_decoder_base_attrs ,
} ;
static struct attribute * cxl_decoder_root_attrs [ ] = {
& dev_attr_cap_pmem . attr ,
& dev_attr_cap_ram . attr ,
& dev_attr_cap_type2 . attr ,
& dev_attr_cap_type3 . attr ,
2022-01-23 16:31:41 -08:00
& dev_attr_target_list . attr ,
2021-06-09 09:43:29 -07:00
NULL ,
} ;
static struct attribute_group cxl_decoder_root_attribute_group = {
. attrs = cxl_decoder_root_attrs ,
} ;
static const struct attribute_group * cxl_decoder_root_attribute_groups [ ] = {
& cxl_decoder_root_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
static struct attribute * cxl_decoder_switch_attrs [ ] = {
& dev_attr_target_type . attr ,
2022-01-23 16:31:41 -08:00
& dev_attr_target_list . attr ,
2021-06-09 09:43:29 -07:00
NULL ,
} ;
static struct attribute_group cxl_decoder_switch_attribute_group = {
. attrs = cxl_decoder_switch_attrs ,
} ;
static const struct attribute_group * cxl_decoder_switch_attribute_groups [ ] = {
& cxl_decoder_switch_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
2022-02-02 20:02:06 -08:00
static struct attribute * cxl_decoder_endpoint_attrs [ ] = {
& dev_attr_target_type . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_endpoint_attribute_group = {
. attrs = cxl_decoder_endpoint_attrs ,
} ;
static const struct attribute_group * cxl_decoder_endpoint_attribute_groups [ ] = {
& cxl_decoder_base_attribute_group ,
& cxl_decoder_endpoint_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
2021-06-09 09:43:29 -07:00
static void cxl_decoder_release ( struct device * dev )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
struct cxl_port * port = to_cxl_port ( dev - > parent ) ;
ida_free ( & port - > decoder_ida , cxld - > id ) ;
kfree ( cxld ) ;
}
2022-02-02 20:02:06 -08:00
static const struct device_type cxl_decoder_endpoint_type = {
. name = " cxl_decoder_endpoint " ,
. release = cxl_decoder_release ,
. groups = cxl_decoder_endpoint_attribute_groups ,
} ;
2021-06-09 09:43:29 -07:00
static const struct device_type cxl_decoder_switch_type = {
. name = " cxl_decoder_switch " ,
. release = cxl_decoder_release ,
. groups = cxl_decoder_switch_attribute_groups ,
} ;
static const struct device_type cxl_decoder_root_type = {
. name = " cxl_decoder_root " ,
. release = cxl_decoder_release ,
. groups = cxl_decoder_root_attribute_groups ,
} ;
2022-02-02 20:02:06 -08:00
static bool is_endpoint_decoder ( struct device * dev )
{
return dev - > type = = & cxl_decoder_endpoint_type ;
}
2021-06-15 16:18:17 -07:00
bool is_root_decoder ( struct device * dev )
{
return dev - > type = = & cxl_decoder_root_type ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( is_root_decoder , CXL ) ;
2021-06-15 16:18:17 -07:00
2022-01-31 11:50:09 -08:00
bool is_cxl_decoder ( struct device * dev )
{
return dev - > type - > release = = cxl_decoder_release ;
}
EXPORT_SYMBOL_NS_GPL ( is_cxl_decoder , CXL ) ;
2021-06-09 09:43:29 -07:00
struct cxl_decoder * to_cxl_decoder ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , dev - > type - > release ! = cxl_decoder_release ,
" not a cxl_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_decoder , dev ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( to_cxl_decoder , CXL ) ;
2021-06-09 09:43:29 -07:00
2022-02-04 07:08:40 -08:00
static void cxl_ep_release ( struct cxl_ep * ep )
{
if ( ! ep )
return ;
list_del ( & ep - > list ) ;
put_device ( ep - > ep ) ;
kfree ( ep ) ;
}
2021-06-09 09:01:35 -07:00
static void cxl_port_release ( struct device * dev )
{
struct cxl_port * port = to_cxl_port ( dev ) ;
2022-02-04 07:08:40 -08:00
struct cxl_ep * ep , * _e ;
2021-06-09 09:01:35 -07:00
2022-02-04 07:08:40 -08:00
cxl_device_lock ( dev ) ;
list_for_each_entry_safe ( ep , _e , & port - > endpoints , list )
cxl_ep_release ( ep ) ;
cxl_device_unlock ( dev ) ;
2021-06-09 09:01:35 -07:00
ida_free ( & cxl_port_ida , port - > id ) ;
kfree ( port ) ;
}
static const struct attribute_group * cxl_port_attribute_groups [ ] = {
& cxl_base_attribute_group ,
NULL ,
} ;
static const struct device_type cxl_port_type = {
. name = " cxl_port " ,
. release = cxl_port_release ,
. groups = cxl_port_attribute_groups ,
} ;
2022-01-31 11:50:09 -08:00
bool is_cxl_port ( struct device * dev )
{
return dev - > type = = & cxl_port_type ;
}
EXPORT_SYMBOL_NS_GPL ( is_cxl_port , CXL ) ;
2021-06-09 09:01:35 -07:00
struct cxl_port * to_cxl_port ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , dev - > type ! = & cxl_port_type ,
" not a cxl_port device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_port , dev ) ;
}
2022-01-31 11:50:09 -08:00
EXPORT_SYMBOL_NS_GPL ( to_cxl_port , CXL ) ;
2021-06-09 09:01:35 -07:00
2021-06-09 09:01:46 -07:00
static void unregister_port ( void * _port )
2021-06-09 09:01:35 -07:00
{
2021-06-09 09:01:46 -07:00
struct cxl_port * port = _port ;
2022-02-04 07:18:31 -08:00
if ( ! is_cxl_root ( port ) ) {
2022-02-01 13:07:51 -08:00
device_lock_assert ( port - > dev . parent ) ;
2022-02-04 07:18:31 -08:00
port - > uport = NULL ;
}
2022-02-01 13:07:51 -08:00
2021-06-09 09:01:46 -07:00
device_unregister ( & port - > dev ) ;
2021-06-09 09:01:35 -07:00
}
static void cxl_unlink_uport ( void * _port )
{
struct cxl_port * port = _port ;
sysfs_remove_link ( & port - > dev . kobj , " uport " ) ;
}
static int devm_cxl_link_uport ( struct device * host , struct cxl_port * port )
{
int rc ;
rc = sysfs_create_link ( & port - > dev . kobj , & port - > uport - > kobj , " uport " ) ;
if ( rc )
return rc ;
return devm_add_action_or_reset ( host , cxl_unlink_uport , port ) ;
}
static struct cxl_port * cxl_port_alloc ( struct device * uport ,
resource_size_t component_reg_phys ,
struct cxl_port * parent_port )
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
port = kzalloc ( sizeof ( * port ) , GFP_KERNEL ) ;
if ( ! port )
return ERR_PTR ( - ENOMEM ) ;
rc = ida_alloc ( & cxl_port_ida , GFP_KERNEL ) ;
if ( rc < 0 )
goto err ;
port - > id = rc ;
/*
* The top - level cxl_port " cxl_root " does not have a cxl_port as
* its parent and it does not have any corresponding component
* registers as its decode is described by a fixed platform
* description .
*/
dev = & port - > dev ;
if ( parent_port )
dev - > parent = & parent_port - > dev ;
else
dev - > parent = uport ;
port - > uport = uport ;
port - > component_reg_phys = component_reg_phys ;
2021-06-09 09:43:29 -07:00
ida_init ( & port - > decoder_ida ) ;
2021-06-09 09:01:46 -07:00
INIT_LIST_HEAD ( & port - > dports ) ;
2022-02-04 07:08:40 -08:00
INIT_LIST_HEAD ( & port - > endpoints ) ;
2021-06-09 09:01:35 -07:00
device_initialize ( dev ) ;
device_set_pm_not_required ( dev ) ;
dev - > bus = & cxl_bus_type ;
dev - > type = & cxl_port_type ;
return port ;
err :
kfree ( port ) ;
return ERR_PTR ( rc ) ;
}
/**
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
* @ host : host device for devm operations
* @ uport : " physical " device implementing this upstream port
* @ component_reg_phys : ( optional ) for configurable cxl_port instances
* @ parent_port : next hop up in the CXL memory decode hierarchy
*/
struct cxl_port * devm_cxl_add_port ( struct device * host , struct device * uport ,
resource_size_t component_reg_phys ,
struct cxl_port * parent_port )
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
port = cxl_port_alloc ( uport , component_reg_phys , parent_port ) ;
if ( IS_ERR ( port ) )
return port ;
2022-01-23 16:29:53 -08:00
if ( parent_port )
port - > depth = parent_port - > depth + 1 ;
2021-06-09 09:01:35 -07:00
dev = & port - > dev ;
2022-02-04 07:18:31 -08:00
if ( is_cxl_memdev ( uport ) )
rc = dev_set_name ( dev , " endpoint%d " , port - > id ) ;
else if ( parent_port )
2021-06-09 09:01:35 -07:00
rc = dev_set_name ( dev , " port%d " , port - > id ) ;
else
rc = dev_set_name ( dev , " root%d " , port - > id ) ;
if ( rc )
goto err ;
rc = device_add ( dev ) ;
if ( rc )
goto err ;
2021-06-09 09:01:46 -07:00
rc = devm_add_action_or_reset ( host , unregister_port , port ) ;
2021-06-09 09:01:35 -07:00
if ( rc )
return ERR_PTR ( rc ) ;
rc = devm_cxl_link_uport ( host , port ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return port ;
err :
put_device ( dev ) ;
return ERR_PTR ( rc ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( devm_cxl_add_port , CXL ) ;
2021-06-09 09:01:35 -07:00
2022-01-31 08:44:52 -08:00
struct pci_bus * cxl_port_to_pci_bus ( struct cxl_port * port )
{
/* There is no pci_bus associated with a CXL platform-root port */
if ( is_cxl_root ( port ) )
return NULL ;
if ( dev_is_pci ( port - > uport ) ) {
struct pci_dev * pdev = to_pci_dev ( port - > uport ) ;
return pdev - > subordinate ;
}
return xa_load ( & cxl_root_buses , ( unsigned long ) port - > uport ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_port_to_pci_bus , CXL ) ;
static void unregister_pci_bus ( void * uport )
{
xa_erase ( & cxl_root_buses , ( unsigned long ) uport ) ;
}
int devm_cxl_register_pci_bus ( struct device * host , struct device * uport ,
struct pci_bus * bus )
{
int rc ;
if ( dev_is_pci ( uport ) )
return - EINVAL ;
rc = xa_insert ( & cxl_root_buses , ( unsigned long ) uport , bus , GFP_KERNEL ) ;
if ( rc )
return rc ;
return devm_add_action_or_reset ( host , unregister_pci_bus , uport ) ;
}
EXPORT_SYMBOL_NS_GPL ( devm_cxl_register_pci_bus , CXL ) ;
2022-02-04 07:08:40 -08:00
static bool dev_is_cxl_root_child ( struct device * dev )
2022-01-31 16:34:40 -08:00
{
struct cxl_port * port , * parent ;
if ( ! is_cxl_port ( dev ) )
2022-02-04 07:08:40 -08:00
return false ;
2022-01-31 16:34:40 -08:00
port = to_cxl_port ( dev ) ;
if ( is_cxl_root ( port ) )
2022-02-04 07:08:40 -08:00
return false ;
2022-01-31 16:34:40 -08:00
parent = to_cxl_port ( port - > dev . parent ) ;
2022-02-04 07:08:40 -08:00
if ( is_cxl_root ( parent ) )
return true ;
return false ;
}
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
static int match_root_child ( struct device * dev , const void * match )
{
const struct device * iter = NULL ;
struct cxl_dport * dport ;
struct cxl_port * port ;
if ( ! dev_is_cxl_root_child ( dev ) )
2022-01-31 16:34:40 -08:00
return 0 ;
2022-02-04 07:08:40 -08:00
port = to_cxl_port ( dev ) ;
cxl_device_lock ( dev ) ;
2022-01-31 16:34:40 -08:00
list_for_each_entry ( dport , & port - > dports , list ) {
iter = match ;
while ( iter ) {
if ( iter = = dport - > dport )
goto out ;
iter = iter - > parent ;
}
}
out :
2022-02-04 07:08:40 -08:00
cxl_device_unlock ( dev ) ;
2022-01-31 16:34:40 -08:00
return ! ! iter ;
}
struct cxl_port * find_cxl_root ( struct device * dev )
{
struct device * port_dev ;
struct cxl_port * root ;
port_dev = bus_find_device ( & cxl_bus_type , NULL , dev , match_root_child ) ;
if ( ! port_dev )
return NULL ;
root = to_cxl_port ( port_dev - > parent ) ;
get_device ( & root - > dev ) ;
put_device ( port_dev ) ;
return root ;
}
EXPORT_SYMBOL_NS_GPL ( find_cxl_root , CXL ) ;
2021-06-09 09:01:46 -07:00
static struct cxl_dport * find_dport ( struct cxl_port * port , int id )
{
struct cxl_dport * dport ;
device_lock_assert ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > port_id = = id )
return dport ;
return NULL ;
}
static int add_dport ( struct cxl_port * port , struct cxl_dport * new )
{
struct cxl_dport * dup ;
2022-01-31 17:07:38 -08:00
device_lock_assert ( & port - > dev ) ;
2021-06-09 09:01:46 -07:00
dup = find_dport ( port , new - > port_id ) ;
if ( dup )
dev_err ( & port - > dev ,
" unable to add dport%d-%s non-unique port id (%s) \n " ,
new - > port_id , dev_name ( new - > dport ) ,
dev_name ( dup - > dport ) ) ;
else
list_add_tail ( & new - > list , & port - > dports ) ;
return dup ? - EEXIST : 0 ;
}
2022-02-01 13:07:51 -08:00
/*
* Since root - level CXL dports cannot be enumerated by PCI they are not
* enumerated by the common port driver that acquires the port lock over
* dport add / remove . Instead , root dports are manually added by a
* platform driver and cond_cxl_root_lock ( ) is used to take the missing
* port lock in that case .
*/
static void cond_cxl_root_lock ( struct cxl_port * port )
{
if ( is_cxl_root ( port ) )
cxl_device_lock ( & port - > dev ) ;
}
static void cond_cxl_root_unlock ( struct cxl_port * port )
{
if ( is_cxl_root ( port ) )
cxl_device_unlock ( & port - > dev ) ;
}
2022-01-31 18:10:04 -08:00
static void cxl_dport_remove ( void * data )
{
struct cxl_dport * dport = data ;
struct cxl_port * port = dport - > port ;
put_device ( dport - > dport ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_lock ( port ) ;
2022-01-31 18:10:04 -08:00
list_del ( & dport - > list ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_unlock ( port ) ;
2022-01-31 18:10:04 -08:00
}
static void cxl_dport_unlink ( void * data )
{
struct cxl_dport * dport = data ;
struct cxl_port * port = dport - > port ;
char link_name [ CXL_TARGET_STRLEN ] ;
sprintf ( link_name , " dport%d " , dport - > port_id ) ;
sysfs_remove_link ( & port - > dev . kobj , link_name ) ;
}
2021-06-09 09:01:46 -07:00
/**
2022-01-31 18:10:04 -08:00
* devm_cxl_add_dport - append downstream port data to a cxl_port
2021-06-09 09:01:46 -07:00
* @ port : the cxl_port that references this dport
* @ dport_dev : firmware or PCI device representing the dport
* @ port_id : identifier for this dport in a decoder ' s target list
* @ component_reg_phys : optional location of CXL component registers
*
2022-01-31 18:10:04 -08:00
* Note that dports are appended to the devm release action ' s of the
* either the port ' s host ( for root ports ) , or the port itself ( for
* switch ports )
2021-06-09 09:01:46 -07:00
*/
2022-02-01 13:23:14 -08:00
struct cxl_dport * devm_cxl_add_dport ( struct cxl_port * port ,
2022-01-31 18:10:04 -08:00
struct device * dport_dev , int port_id ,
resource_size_t component_reg_phys )
2021-06-09 09:01:46 -07:00
{
char link_name [ CXL_TARGET_STRLEN ] ;
struct cxl_dport * dport ;
2022-02-01 13:23:14 -08:00
struct device * host ;
2021-06-09 09:01:46 -07:00
int rc ;
2022-02-01 13:23:14 -08:00
if ( is_cxl_root ( port ) )
host = port - > uport ;
else
host = & port - > dev ;
2022-01-31 18:10:04 -08:00
if ( ! host - > driver ) {
dev_WARN_ONCE ( & port - > dev , 1 , " dport:%s bad devm context \n " ,
dev_name ( dport_dev ) ) ;
return ERR_PTR ( - ENXIO ) ;
}
2021-06-09 09:01:46 -07:00
if ( snprintf ( link_name , CXL_TARGET_STRLEN , " dport%d " , port_id ) > =
CXL_TARGET_STRLEN )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( - EINVAL ) ;
2021-06-09 09:01:46 -07:00
2022-01-31 18:10:04 -08:00
dport = devm_kzalloc ( host , sizeof ( * dport ) , GFP_KERNEL ) ;
2021-06-09 09:01:46 -07:00
if ( ! dport )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( - ENOMEM ) ;
2021-06-09 09:01:46 -07:00
INIT_LIST_HEAD ( & dport - > list ) ;
2022-01-31 18:10:04 -08:00
dport - > dport = dport_dev ;
2021-06-09 09:01:46 -07:00
dport - > port_id = port_id ;
dport - > component_reg_phys = component_reg_phys ;
dport - > port = port ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_lock ( port ) ;
2021-06-09 09:01:46 -07:00
rc = add_dport ( port , dport ) ;
2022-02-01 13:07:51 -08:00
cond_cxl_root_unlock ( port ) ;
2021-06-09 09:01:46 -07:00
if ( rc )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( rc ) ;
get_device ( dport_dev ) ;
rc = devm_add_action_or_reset ( host , cxl_dport_remove , dport ) ;
if ( rc )
return ERR_PTR ( rc ) ;
2021-06-09 09:01:46 -07:00
rc = sysfs_create_link ( & port - > dev . kobj , & dport_dev - > kobj , link_name ) ;
if ( rc )
2022-01-31 18:10:04 -08:00
return ERR_PTR ( rc ) ;
2021-06-09 09:01:46 -07:00
2022-01-31 18:10:04 -08:00
rc = devm_add_action_or_reset ( host , cxl_dport_unlink , dport ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return dport ;
2021-06-09 09:01:46 -07:00
}
2022-01-31 18:10:04 -08:00
EXPORT_SYMBOL_NS_GPL ( devm_cxl_add_dport , CXL ) ;
2021-06-09 09:01:46 -07:00
2022-02-04 07:08:40 -08:00
static struct cxl_ep * find_ep ( struct cxl_port * port , struct device * ep_dev )
{
struct cxl_ep * ep ;
device_lock_assert ( & port - > dev ) ;
list_for_each_entry ( ep , & port - > endpoints , list )
if ( ep - > ep = = ep_dev )
return ep ;
return NULL ;
}
static int add_ep ( struct cxl_port * port , struct cxl_ep * new )
{
struct cxl_ep * dup ;
cxl_device_lock ( & port - > dev ) ;
if ( port - > dead ) {
cxl_device_unlock ( & port - > dev ) ;
return - ENXIO ;
}
dup = find_ep ( port , new - > ep ) ;
if ( ! dup )
list_add_tail ( & new - > list , & port - > endpoints ) ;
cxl_device_unlock ( & port - > dev ) ;
return dup ? - EEXIST : 0 ;
}
/**
* cxl_add_ep - register an endpoint ' s interest in a port
* @ port : a port in the endpoint ' s topology ancestry
* @ ep_dev : device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints .
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed .
*/
static int cxl_add_ep ( struct cxl_port * port , struct device * ep_dev )
{
struct cxl_ep * ep ;
int rc ;
ep = kzalloc ( sizeof ( * ep ) , GFP_KERNEL ) ;
if ( ! ep )
return - ENOMEM ;
INIT_LIST_HEAD ( & ep - > list ) ;
ep - > ep = get_device ( ep_dev ) ;
rc = add_ep ( port , ep ) ;
if ( rc )
cxl_ep_release ( ep ) ;
return rc ;
}
struct cxl_find_port_ctx {
const struct device * dport_dev ;
const struct cxl_port * parent_port ;
} ;
static int match_port_by_dport ( struct device * dev , const void * data )
{
const struct cxl_find_port_ctx * ctx = data ;
struct cxl_port * port ;
if ( ! is_cxl_port ( dev ) )
return 0 ;
if ( ctx - > parent_port & & dev - > parent ! = & ctx - > parent_port - > dev )
return 0 ;
port = to_cxl_port ( dev ) ;
return cxl_find_dport_by_dev ( port , ctx - > dport_dev ) ! = NULL ;
}
static struct cxl_port * __find_cxl_port ( struct cxl_find_port_ctx * ctx )
{
struct device * dev ;
if ( ! ctx - > dport_dev )
return NULL ;
dev = bus_find_device ( & cxl_bus_type , NULL , ctx , match_port_by_dport ) ;
if ( dev )
return to_cxl_port ( dev ) ;
return NULL ;
}
static struct cxl_port * find_cxl_port ( struct device * dport_dev )
{
struct cxl_find_port_ctx ctx = {
. dport_dev = dport_dev ,
} ;
return __find_cxl_port ( & ctx ) ;
}
static struct cxl_port * find_cxl_port_at ( struct cxl_port * parent_port ,
struct device * dport_dev )
{
struct cxl_find_port_ctx ctx = {
. dport_dev = dport_dev ,
. parent_port = parent_port ,
} ;
return __find_cxl_port ( & ctx ) ;
}
/*
* All users of grandparent ( ) are using it to walk PCIe - like swich port
* hierarchy . A PCIe switch is comprised of a bridge device representing the
* upstream switch port and N bridges representing downstream switch ports . When
* bridges stack the grand - parent of a downstream switch port is another
* downstream switch port in the immediate ancestor switch .
*/
static struct device * grandparent ( struct device * dev )
{
if ( dev & & dev - > parent )
return dev - > parent - > parent ;
return NULL ;
}
2022-02-04 07:18:31 -08:00
static void delete_endpoint ( void * data )
{
struct cxl_memdev * cxlmd = data ;
struct cxl_port * endpoint = dev_get_drvdata ( & cxlmd - > dev ) ;
struct cxl_port * parent_port ;
struct device * parent ;
parent_port = cxl_mem_find_port ( cxlmd ) ;
if ( ! parent_port )
return ;
parent = & parent_port - > dev ;
cxl_device_lock ( parent ) ;
if ( parent - > driver & & endpoint - > uport ) {
devm_release_action ( parent , cxl_unlink_uport , endpoint ) ;
devm_release_action ( parent , unregister_port , endpoint ) ;
}
cxl_device_unlock ( parent ) ;
put_device ( parent ) ;
put_device ( & endpoint - > dev ) ;
}
int cxl_endpoint_autoremove ( struct cxl_memdev * cxlmd , struct cxl_port * endpoint )
{
struct device * dev = & cxlmd - > dev ;
get_device ( & endpoint - > dev ) ;
dev_set_drvdata ( dev , endpoint ) ;
return devm_add_action_or_reset ( dev , delete_endpoint , cxlmd ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_endpoint_autoremove , CXL ) ;
2022-02-04 07:08:40 -08:00
/*
* The natural end of life of a non - root ' cxl_port ' is when its parent port goes
* through a - > remove ( ) event ( " top-down " unregistration ) . The unnatural trigger
* for a port to be unregistered is when all memdevs beneath that port have gone
* through - > remove ( ) . This " bottom-up " removal selectively removes individual
* child ports manually . This depends on devm_cxl_add_port ( ) to not change is
* devm action registration order .
*/
static void delete_switch_port ( struct cxl_port * port , struct list_head * dports )
{
struct cxl_dport * dport , * _d ;
list_for_each_entry_safe ( dport , _d , dports , list ) {
devm_release_action ( & port - > dev , cxl_dport_unlink , dport ) ;
devm_release_action ( & port - > dev , cxl_dport_remove , dport ) ;
devm_kfree ( & port - > dev , dport ) ;
}
devm_release_action ( port - > dev . parent , cxl_unlink_uport , port ) ;
devm_release_action ( port - > dev . parent , unregister_port , port ) ;
}
static void cxl_detach_ep ( void * data )
{
struct cxl_memdev * cxlmd = data ;
struct device * iter ;
for ( iter = & cxlmd - > dev ; iter ; iter = grandparent ( iter ) ) {
struct device * dport_dev = grandparent ( iter ) ;
struct cxl_port * port , * parent_port ;
LIST_HEAD ( reap_dports ) ;
struct cxl_ep * ep ;
if ( ! dport_dev )
break ;
port = find_cxl_port ( dport_dev ) ;
if ( ! port | | is_cxl_root ( port ) ) {
put_device ( & port - > dev ) ;
continue ;
}
parent_port = to_cxl_port ( port - > dev . parent ) ;
cxl_device_lock ( & parent_port - > dev ) ;
if ( ! parent_port - > dev . driver ) {
/*
* The bottom - up race to delete the port lost to a
* top - down port disable , give up here , because the
* parent_port - > remove ( ) will have cleaned up all
* descendants .
*/
cxl_device_unlock ( & parent_port - > dev ) ;
put_device ( & port - > dev ) ;
continue ;
}
cxl_device_lock ( & port - > dev ) ;
ep = find_ep ( port , & cxlmd - > dev ) ;
dev_dbg ( & cxlmd - > dev , " disconnect %s from %s \n " ,
ep ? dev_name ( ep - > ep ) : " " , dev_name ( & port - > dev ) ) ;
cxl_ep_release ( ep ) ;
if ( ep & & ! port - > dead & & list_empty ( & port - > endpoints ) & &
! is_cxl_root ( parent_port ) ) {
/*
* This was the last ep attached to a dynamically
* enumerated port . Block new cxl_add_ep ( ) and garbage
* collect the port .
*/
port - > dead = true ;
list_splice_init ( & port - > dports , & reap_dports ) ;
}
cxl_device_unlock ( & port - > dev ) ;
if ( ! list_empty ( & reap_dports ) ) {
dev_dbg ( & cxlmd - > dev , " delete %s \n " ,
dev_name ( & port - > dev ) ) ;
delete_switch_port ( port , & reap_dports ) ;
}
put_device ( & port - > dev ) ;
cxl_device_unlock ( & parent_port - > dev ) ;
}
}
static resource_size_t find_component_registers ( struct device * dev )
{
struct cxl_register_map map ;
struct pci_dev * pdev ;
/*
* Theoretically , CXL component registers can be hosted on a
* non - PCI device , in practice , only cxl_test hits this case .
*/
if ( ! dev_is_pci ( dev ) )
return CXL_RESOURCE_NONE ;
pdev = to_pci_dev ( dev ) ;
cxl_find_regblock ( pdev , CXL_REGLOC_RBI_COMPONENT , & map ) ;
return cxl_regmap_to_base ( pdev , & map ) ;
}
static int add_port_attach_ep ( struct cxl_memdev * cxlmd ,
struct device * uport_dev ,
struct device * dport_dev )
{
struct device * dparent = grandparent ( dport_dev ) ;
struct cxl_port * port , * parent_port = NULL ;
resource_size_t component_reg_phys ;
int rc ;
if ( ! dparent ) {
/*
* The iteration reached the topology root without finding the
* CXL - root ' cxl_port ' on a previous iteration , fail for now to
* be re - probed after platform driver attaches .
*/
dev_dbg ( & cxlmd - > dev , " %s is a root dport \n " ,
dev_name ( dport_dev ) ) ;
return - ENXIO ;
}
parent_port = find_cxl_port ( dparent ) ;
if ( ! parent_port ) {
/* iterate to create this parent_port */
return - EAGAIN ;
}
cxl_device_lock ( & parent_port - > dev ) ;
if ( ! parent_port - > dev . driver ) {
dev_warn ( & cxlmd - > dev ,
" port %s:%s disabled, failed to enumerate CXL.mem \n " ,
dev_name ( & parent_port - > dev ) , dev_name ( uport_dev ) ) ;
port = ERR_PTR ( - ENXIO ) ;
goto out ;
}
port = find_cxl_port_at ( parent_port , dport_dev ) ;
if ( ! port ) {
component_reg_phys = find_component_registers ( uport_dev ) ;
port = devm_cxl_add_port ( & parent_port - > dev , uport_dev ,
component_reg_phys , parent_port ) ;
if ( ! IS_ERR ( port ) )
get_device ( & port - > dev ) ;
}
out :
cxl_device_unlock ( & parent_port - > dev ) ;
if ( IS_ERR ( port ) )
rc = PTR_ERR ( port ) ;
else {
dev_dbg ( & cxlmd - > dev , " add to new port %s:%s \n " ,
dev_name ( & port - > dev ) , dev_name ( port - > uport ) ) ;
rc = cxl_add_ep ( port , & cxlmd - > dev ) ;
if ( rc = = - EEXIST ) {
/*
* " can't " happen , but this error code means
* something to the caller , so translate it .
*/
rc = - ENXIO ;
}
put_device ( & port - > dev ) ;
}
put_device ( & parent_port - > dev ) ;
return rc ;
}
int devm_cxl_enumerate_ports ( struct cxl_memdev * cxlmd )
{
struct device * dev = & cxlmd - > dev ;
struct device * iter ;
int rc ;
rc = devm_add_action_or_reset ( & cxlmd - > dev , cxl_detach_ep , cxlmd ) ;
if ( rc )
return rc ;
/*
* Scan for and add all cxl_ports in this device ' s ancestry .
* Repeat until no more ports are added . Abort if a port add
* attempt fails .
*/
retry :
for ( iter = dev ; iter ; iter = grandparent ( iter ) ) {
struct device * dport_dev = grandparent ( iter ) ;
struct device * uport_dev ;
struct cxl_port * port ;
if ( ! dport_dev )
return 0 ;
uport_dev = dport_dev - > parent ;
if ( ! uport_dev ) {
dev_warn ( dev , " at %s no parent for dport: %s \n " ,
dev_name ( iter ) , dev_name ( dport_dev ) ) ;
return - ENXIO ;
}
dev_dbg ( dev , " scan: iter: %s dport_dev: %s parent: %s \n " ,
dev_name ( iter ) , dev_name ( dport_dev ) ,
dev_name ( uport_dev ) ) ;
port = find_cxl_port ( dport_dev ) ;
if ( port ) {
dev_dbg ( & cxlmd - > dev ,
" found already registered port %s:%s \n " ,
dev_name ( & port - > dev ) , dev_name ( port - > uport ) ) ;
rc = cxl_add_ep ( port , & cxlmd - > dev ) ;
/*
* If the endpoint already exists in the port ' s list ,
* that ' s ok , it was added on a previous pass .
* Otherwise , retry in add_port_attach_ep ( ) after taking
* the parent_port lock as the current port may be being
* reaped .
*/
if ( rc & & rc ! = - EEXIST ) {
put_device ( & port - > dev ) ;
return rc ;
}
/* Any more ports to add between this one and the root? */
if ( ! dev_is_cxl_root_child ( & port - > dev ) ) {
put_device ( & port - > dev ) ;
continue ;
}
put_device ( & port - > dev ) ;
return 0 ;
}
rc = add_port_attach_ep ( cxlmd , uport_dev , dport_dev ) ;
/* port missing, try to add parent */
if ( rc = = - EAGAIN )
continue ;
/* failed to add ep or port */
if ( rc )
return rc ;
/* port added, new descendants possible, start over */
goto retry ;
}
return 0 ;
}
EXPORT_SYMBOL_NS_GPL ( devm_cxl_enumerate_ports , CXL ) ;
2022-02-04 07:18:31 -08:00
struct cxl_port * cxl_mem_find_port ( struct cxl_memdev * cxlmd )
{
return find_cxl_port ( grandparent ( & cxlmd - > dev ) ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_mem_find_port , CXL ) ;
2022-02-04 07:08:40 -08:00
struct cxl_dport * cxl_find_dport_by_dev ( struct cxl_port * port ,
const struct device * dev )
{
struct cxl_dport * dport ;
cxl_device_lock ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > dport = = dev ) {
cxl_device_unlock ( & port - > dev ) ;
return dport ;
}
cxl_device_unlock ( & port - > dev ) ;
return NULL ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_find_dport_by_dev , CXL ) ;
2021-09-21 12:22:16 -07:00
static int decoder_populate_targets ( struct cxl_decoder * cxld ,
struct cxl_port * port , int * target_map )
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
{
2022-02-01 12:24:30 -08:00
int i , rc = 0 ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
if ( ! target_map )
return 0 ;
2022-02-01 12:24:30 -08:00
device_lock_assert ( & port - > dev ) ;
if ( list_empty ( & port - > dports ) )
return - EINVAL ;
2021-09-21 12:22:16 -07:00
2022-01-31 15:35:18 -08:00
write_seqlock ( & cxld - > target_lock ) ;
2021-09-21 12:22:16 -07:00
for ( i = 0 ; i < cxld - > nr_targets ; i + + ) {
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
struct cxl_dport * dport = find_dport ( port , target_map [ i ] ) ;
if ( ! dport ) {
rc = - ENXIO ;
2022-01-31 15:35:18 -08:00
break ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
}
cxld - > target [ i ] = dport ;
}
2022-01-31 15:35:18 -08:00
write_sequnlock ( & cxld - > target_lock ) ;
2021-09-21 12:22:16 -07:00
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
return rc ;
}
2022-01-31 13:33:13 -08:00
/**
* cxl_decoder_alloc - Allocate a new CXL decoder
* @ port : owning port of this decoder
* @ nr_targets : downstream targets accessible by this decoder . All upstream
2022-02-02 20:02:06 -08:00
* ports and root ports must have at least 1 target . Endpoint
* devices will have 0 targets . Callers wishing to register an
* endpoint device should specify 0.
2022-01-31 13:33:13 -08:00
*
* A port should contain one or more decoders . Each of those decoders enable
* some address space for CXL . mem utilization . A decoder is expected to be
* configured by the caller before registering .
*
2022-01-23 16:29:47 -08:00
* Return : A new cxl decoder to be registered by cxl_decoder_add ( ) . The decoder
* is initialized to be a " passthrough " decoder .
2022-01-31 13:33:13 -08:00
*/
static struct cxl_decoder * cxl_decoder_alloc ( struct cxl_port * port ,
unsigned int nr_targets )
2021-06-09 09:43:29 -07:00
{
2021-12-10 14:36:27 -07:00
struct cxl_decoder * cxld ;
2021-06-09 09:43:29 -07:00
struct device * dev ;
int rc = 0 ;
2022-02-02 20:02:06 -08:00
if ( nr_targets > CXL_DECODER_MAX_INTERLEAVE )
2021-06-09 09:43:29 -07:00
return ERR_PTR ( - EINVAL ) ;
cxld = kzalloc ( struct_size ( cxld , target , nr_targets ) , GFP_KERNEL ) ;
if ( ! cxld )
return ERR_PTR ( - ENOMEM ) ;
rc = ida_alloc ( & port - > decoder_ida , GFP_KERNEL ) ;
if ( rc < 0 )
goto err ;
2021-09-21 12:22:16 -07:00
cxld - > id = rc ;
2021-12-10 14:36:27 -07:00
cxld - > nr_targets = nr_targets ;
2022-01-31 15:35:18 -08:00
seqlock_init ( & cxld - > target_lock ) ;
2021-06-09 09:43:29 -07:00
dev = & cxld - > dev ;
device_initialize ( dev ) ;
device_set_pm_not_required ( dev ) ;
dev - > parent = & port - > dev ;
dev - > bus = & cxl_bus_type ;
2022-01-31 13:33:13 -08:00
if ( is_cxl_root ( port ) )
cxld - > dev . type = & cxl_decoder_root_type ;
2022-02-02 20:02:06 -08:00
else if ( is_cxl_endpoint ( port ) )
cxld - > dev . type = & cxl_decoder_endpoint_type ;
2021-06-09 09:43:29 -07:00
else
2022-01-31 13:33:13 -08:00
cxld - > dev . type = & cxl_decoder_switch_type ;
2021-06-09 09:43:29 -07:00
2022-01-23 16:29:47 -08:00
/* Pre initialize an "empty" decoder */
cxld - > interleave_ways = 1 ;
cxld - > interleave_granularity = PAGE_SIZE ;
cxld - > target_type = CXL_DECODER_EXPANDER ;
cxld - > platform_res = ( struct resource ) DEFINE_RES_MEM ( 0 , 0 ) ;
2021-06-09 09:43:29 -07:00
return cxld ;
err :
kfree ( cxld ) ;
return ERR_PTR ( rc ) ;
}
2022-01-31 13:33:13 -08:00
/**
* cxl_root_decoder_alloc - Allocate a root level decoder
* @ port : owning CXL root of this decoder
* @ nr_targets : static number of downstream targets
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( ) . A
* ' CXL root ' decoder is one that decodes from a top - level / static platform
* firmware description of CXL resources into a CXL standard decode
* topology .
*/
struct cxl_decoder * cxl_root_decoder_alloc ( struct cxl_port * port ,
unsigned int nr_targets )
{
if ( ! is_cxl_root ( port ) )
return ERR_PTR ( - EINVAL ) ;
return cxl_decoder_alloc ( port , nr_targets ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_root_decoder_alloc , CXL ) ;
/**
* cxl_switch_decoder_alloc - Allocate a switch level decoder
* @ port : owning CXL switch port of this decoder
* @ nr_targets : max number of dynamically addressable downstream targets
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( ) . A
* ' switch ' decoder is any decoder that can be enumerated by PCIe
* topology and the HDM Decoder Capability . This includes the decoders
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports .
*/
struct cxl_decoder * cxl_switch_decoder_alloc ( struct cxl_port * port ,
unsigned int nr_targets )
{
2022-02-02 20:02:06 -08:00
if ( is_cxl_root ( port ) | | is_cxl_endpoint ( port ) )
2022-01-31 13:33:13 -08:00
return ERR_PTR ( - EINVAL ) ;
return cxl_decoder_alloc ( port , nr_targets ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_switch_decoder_alloc , CXL ) ;
2022-02-02 20:02:06 -08:00
/**
* cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
* @ port : owning port of this decoder
*
* Return : A new cxl decoder to be registered by cxl_decoder_add ( )
*/
struct cxl_decoder * cxl_endpoint_decoder_alloc ( struct cxl_port * port )
{
if ( ! is_cxl_endpoint ( port ) )
return ERR_PTR ( - EINVAL ) ;
return cxl_decoder_alloc ( port , 0 ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_endpoint_decoder_alloc , CXL ) ;
2022-01-31 13:33:13 -08:00
/**
2022-02-01 12:24:30 -08:00
* cxl_decoder_add_locked - Add a decoder with targets
2022-01-31 13:33:13 -08:00
* @ cxld : The cxl decoder allocated by cxl_decoder_alloc ( )
* @ target_map : A list of downstream ports that this decoder can direct memory
* traffic to . These numbers should correspond with the port number
* in the PCIe Link Capabilities structure .
*
* Certain types of decoders may not have any targets . The main example of this
* is an endpoint device . A more awkward example is a hostbridge whose root
* ports get hot added ( technically possible , though unlikely ) .
*
2022-02-01 12:24:30 -08:00
* This is the locked variant of cxl_decoder_add ( ) .
*
* Context : Process context . Expects the device lock of the port that owns the
* @ cxld to be held .
2022-01-31 13:33:13 -08:00
*
* Return : Negative error code if the decoder wasn ' t properly configured ; else
* returns 0.
*/
2022-02-01 12:24:30 -08:00
int cxl_decoder_add_locked ( struct cxl_decoder * cxld , int * target_map )
2021-06-09 09:43:29 -07:00
{
2021-09-21 12:22:16 -07:00
struct cxl_port * port ;
2021-06-09 09:43:29 -07:00
struct device * dev ;
int rc ;
2021-09-21 12:22:16 -07:00
if ( WARN_ON_ONCE ( ! cxld ) )
return - EINVAL ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
if ( WARN_ON_ONCE ( IS_ERR ( cxld ) ) )
return PTR_ERR ( cxld ) ;
2021-06-09 09:43:29 -07:00
2021-09-21 12:22:16 -07:00
if ( cxld - > interleave_ways < 1 )
return - EINVAL ;
2021-06-09 09:43:29 -07:00
2022-02-02 20:02:06 -08:00
dev = & cxld - > dev ;
2021-09-21 12:22:16 -07:00
port = to_cxl_port ( cxld - > dev . parent ) ;
2022-02-02 20:02:06 -08:00
if ( ! is_endpoint_decoder ( dev ) ) {
rc = decoder_populate_targets ( cxld , port , target_map ) ;
if ( rc )
return rc ;
}
2021-06-09 09:43:29 -07:00
2021-09-21 12:22:16 -07:00
rc = dev_set_name ( dev , " decoder%d.%d " , port - > id , cxld - > id ) ;
2021-06-09 09:43:29 -07:00
if ( rc )
2021-09-21 12:22:16 -07:00
return rc ;
2021-06-09 09:43:29 -07:00
2022-01-23 16:29:31 -08:00
/*
* Platform decoder resources should show up with a reasonable name . All
* other resources are just sub ranges within the main decoder resource .
*/
if ( is_root_decoder ( dev ) )
cxld - > platform_res . name = dev_name ( dev ) ;
2021-09-21 12:22:16 -07:00
return device_add ( dev ) ;
2021-06-09 09:43:29 -07:00
}
2022-02-01 12:24:30 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_add_locked , CXL ) ;
/**
* cxl_decoder_add - Add a decoder with targets
* @ cxld : The cxl decoder allocated by cxl_decoder_alloc ( )
* @ target_map : A list of downstream ports that this decoder can direct memory
* traffic to . These numbers should correspond with the port number
* in the PCIe Link Capabilities structure .
*
* This is the unlocked variant of cxl_decoder_add_locked ( ) .
* See cxl_decoder_add_locked ( ) .
*
* Context : Process context . Takes and releases the device lock of the port that
* owns the @ cxld .
*/
int cxl_decoder_add ( struct cxl_decoder * cxld , int * target_map )
{
struct cxl_port * port ;
int rc ;
if ( WARN_ON_ONCE ( ! cxld ) )
return - EINVAL ;
if ( WARN_ON_ONCE ( IS_ERR ( cxld ) ) )
return PTR_ERR ( cxld ) ;
port = to_cxl_port ( cxld - > dev . parent ) ;
cxl_device_lock ( & port - > dev ) ;
rc = cxl_decoder_add_locked ( cxld , target_map ) ;
cxl_device_unlock ( & port - > dev ) ;
return rc ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_add , CXL ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
static void cxld_unregister ( void * dev )
{
device_unregister ( dev ) ;
}
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-09-21 12:22:16 -07:00
int cxl_decoder_autoremove ( struct device * host , struct cxl_decoder * cxld )
{
return devm_add_action_or_reset ( host , cxld_unregister , & cxld - > dev ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_decoder_autoremove , CXL ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
2021-06-15 16:18:11 -07:00
/**
* __cxl_driver_register - register a driver for the cxl bus
* @ cxl_drv : cxl driver structure to attach
* @ owner : owning module / driver
* @ modname : KBUILD_MODNAME for parent driver
*/
int __cxl_driver_register ( struct cxl_driver * cxl_drv , struct module * owner ,
const char * modname )
{
if ( ! cxl_drv - > probe ) {
pr_debug ( " %s ->probe() must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > name ) {
pr_debug ( " %s ->name must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > id ) {
pr_debug ( " %s ->id must be specified \n " , modname ) ;
return - EINVAL ;
}
cxl_drv - > drv . bus = & cxl_bus_type ;
cxl_drv - > drv . owner = owner ;
cxl_drv - > drv . mod_name = modname ;
cxl_drv - > drv . name = cxl_drv - > name ;
return driver_register ( & cxl_drv - > drv ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( __cxl_driver_register , CXL ) ;
2021-06-15 16:18:11 -07:00
void cxl_driver_unregister ( struct cxl_driver * cxl_drv )
{
driver_unregister ( & cxl_drv - > drv ) ;
}
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_driver_unregister , CXL ) ;
2021-06-15 16:18:11 -07:00
static int cxl_bus_uevent ( struct device * dev , struct kobj_uevent_env * env )
{
return add_uevent_var ( env , " MODALIAS= " CXL_MODALIAS_FMT ,
cxl_device_id ( dev ) ) ;
}
static int cxl_bus_match ( struct device * dev , struct device_driver * drv )
{
return cxl_device_id ( dev ) = = to_cxl_drv ( drv ) - > id ;
}
static int cxl_bus_probe ( struct device * dev )
{
2022-01-31 11:50:09 -08:00
int rc ;
/*
* Take the CXL nested lock since the driver core only holds
* @ dev - > mutex and not @ dev - > lockdep_mutex .
*/
cxl_nested_lock ( dev ) ;
rc = to_cxl_drv ( dev - > driver ) - > probe ( dev ) ;
cxl_nested_unlock ( dev ) ;
2022-02-01 13:07:51 -08:00
dev_dbg ( dev , " probe: %d \n " , rc ) ;
2022-01-31 11:50:09 -08:00
return rc ;
2021-06-15 16:18:11 -07:00
}
2021-07-13 21:35:22 +02:00
static void cxl_bus_remove ( struct device * dev )
2021-06-15 16:18:11 -07:00
{
struct cxl_driver * cxl_drv = to_cxl_drv ( dev - > driver ) ;
2022-01-31 11:50:09 -08:00
cxl_nested_lock ( dev ) ;
2021-06-15 16:18:11 -07:00
if ( cxl_drv - > remove )
cxl_drv - > remove ( dev ) ;
2022-01-31 11:50:09 -08:00
cxl_nested_unlock ( dev ) ;
2021-06-15 16:18:11 -07:00
}
2022-02-04 07:18:31 -08:00
static struct workqueue_struct * cxl_bus_wq ;
int cxl_bus_rescan ( void )
{
return bus_rescan_devices ( & cxl_bus_type ) ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_bus_rescan , CXL ) ;
bool schedule_cxl_memdev_detach ( struct cxl_memdev * cxlmd )
{
return queue_work ( cxl_bus_wq , & cxlmd - > detach_work ) ;
}
EXPORT_SYMBOL_NS_GPL ( schedule_cxl_memdev_detach , CXL ) ;
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store ( struct bus_type * bus , const char * buf , size_t count )
{
if ( sysfs_streq ( buf , " 1 " ) ) {
flush_workqueue ( cxl_bus_wq ) ;
return count ;
}
return - EINVAL ;
}
static BUS_ATTR_WO ( flush ) ;
static struct attribute * cxl_bus_attributes [ ] = {
& bus_attr_flush . attr ,
NULL ,
} ;
static struct attribute_group cxl_bus_attribute_group = {
. attrs = cxl_bus_attributes ,
} ;
static const struct attribute_group * cxl_bus_attribute_groups [ ] = {
& cxl_bus_attribute_group ,
NULL ,
} ;
2021-02-16 20:09:52 -08:00
struct bus_type cxl_bus_type = {
. name = " cxl " ,
2021-06-15 16:18:11 -07:00
. uevent = cxl_bus_uevent ,
. match = cxl_bus_match ,
. probe = cxl_bus_probe ,
. remove = cxl_bus_remove ,
2022-02-04 07:18:31 -08:00
. bus_groups = cxl_bus_attribute_groups ,
2021-02-16 20:09:52 -08:00
} ;
2021-11-12 16:32:58 -08:00
EXPORT_SYMBOL_NS_GPL ( cxl_bus_type , CXL ) ;
2021-02-16 20:09:52 -08:00
2021-05-13 22:22:00 -07:00
static __init int cxl_core_init ( void )
2021-02-16 20:09:52 -08:00
{
2021-08-02 10:30:05 -07:00
int rc ;
2021-09-08 22:12:32 -07:00
cxl_mbox_init ( ) ;
2021-08-02 10:30:05 -07:00
rc = cxl_memdev_init ( ) ;
if ( rc )
return rc ;
2022-02-04 07:18:31 -08:00
cxl_bus_wq = alloc_ordered_workqueue ( " cxl_port " , 0 ) ;
if ( ! cxl_bus_wq ) {
rc = - ENOMEM ;
goto err_wq ;
}
2021-08-02 10:30:05 -07:00
rc = bus_register ( & cxl_bus_type ) ;
if ( rc )
2022-02-04 07:18:31 -08:00
goto err_bus ;
2021-08-02 10:30:05 -07:00
return 0 ;
2022-02-04 07:18:31 -08:00
err_bus :
destroy_workqueue ( cxl_bus_wq ) ;
err_wq :
2021-08-02 10:30:05 -07:00
cxl_memdev_exit ( ) ;
2021-09-08 22:12:32 -07:00
cxl_mbox_exit ( ) ;
2021-08-02 10:30:05 -07:00
return rc ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
static void cxl_core_exit ( void )
2021-02-16 20:09:52 -08:00
{
bus_unregister ( & cxl_bus_type ) ;
2022-02-04 07:18:31 -08:00
destroy_workqueue ( cxl_bus_wq ) ;
2021-08-02 10:30:05 -07:00
cxl_memdev_exit ( ) ;
2021-09-08 22:12:32 -07:00
cxl_mbox_exit ( ) ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
module_init ( cxl_core_init ) ;
module_exit ( cxl_core_exit ) ;
2021-02-16 20:09:52 -08:00
MODULE_LICENSE ( " GPL v2 " ) ;