2021-02-16 20:09:52 -08:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
2021-05-13 22:22:05 -07:00
# include <linux/io-64-nonatomic-lo-hi.h>
2021-02-16 20:09:52 -08:00
# include <linux/device.h>
# include <linux/module.h>
2021-06-03 17:50:36 -07:00
# include <linux/pci.h>
2021-06-09 09:01:35 -07:00
# include <linux/slab.h>
# include <linux/idr.h>
2021-08-02 10:29:38 -07:00
# include <cxlmem.h>
# include <cxl.h>
2021-08-02 10:29:49 -07:00
# include "core.h"
2021-02-16 20:09:52 -08:00
/**
2021-05-13 22:22:00 -07:00
* DOC : cxl core
2021-02-16 20:09:52 -08:00
*
2021-08-02 10:29:43 -07:00
* The CXL core provides a set of interfaces that can be consumed by CXL aware
* drivers . The interfaces allow for creation , modification , and destruction of
* regions , memory devices , ports , and decoders . CXL aware drivers must register
* with the CXL core via these interfaces in order to be able to participate in
* cross - device interleave coordination . The CXL core also establishes and
* maintains the bridge to the nvdimm subsystem .
*
* CXL core introduces sysfs hierarchy to control the devices that are
* instantiated by the core .
2021-02-16 20:09:52 -08:00
*/
2021-05-13 22:22:00 -07:00
2021-06-09 09:01:35 -07:00
static DEFINE_IDA ( cxl_port_ida ) ;
static ssize_t devtype_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sysfs_emit ( buf , " %s \n " , dev - > type - > name ) ;
}
static DEVICE_ATTR_RO ( devtype ) ;
static struct attribute * cxl_base_attributes [ ] = {
& dev_attr_devtype . attr ,
NULL ,
} ;
2021-08-02 10:29:49 -07:00
struct attribute_group cxl_base_attribute_group = {
2021-06-09 09:01:35 -07:00
. attrs = cxl_base_attributes ,
} ;
2021-06-09 09:43:29 -07:00
static ssize_t start_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
return sysfs_emit ( buf , " %#llx \n " , cxld - > range . start ) ;
}
static DEVICE_ATTR_RO ( start ) ;
static ssize_t size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
return sysfs_emit ( buf , " %#llx \n " , range_len ( & cxld - > range ) ) ;
}
static DEVICE_ATTR_RO ( size ) ;
# define CXL_DECODER_FLAG_ATTR(name, flag) \
static ssize_t name # # _show ( struct device * dev , \
struct device_attribute * attr , char * buf ) \
{ \
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ; \
\
return sysfs_emit ( buf , " %s \n " , \
( cxld - > flags & ( flag ) ) ? " 1 " : " 0 " ) ; \
} \
static DEVICE_ATTR_RO ( name )
CXL_DECODER_FLAG_ATTR ( cap_pmem , CXL_DECODER_F_PMEM ) ;
CXL_DECODER_FLAG_ATTR ( cap_ram , CXL_DECODER_F_RAM ) ;
CXL_DECODER_FLAG_ATTR ( cap_type2 , CXL_DECODER_F_TYPE2 ) ;
CXL_DECODER_FLAG_ATTR ( cap_type3 , CXL_DECODER_F_TYPE3 ) ;
CXL_DECODER_FLAG_ATTR ( locked , CXL_DECODER_F_LOCK ) ;
static ssize_t target_type_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
switch ( cxld - > target_type ) {
case CXL_DECODER_ACCELERATOR :
return sysfs_emit ( buf , " accelerator \n " ) ;
case CXL_DECODER_EXPANDER :
return sysfs_emit ( buf , " expander \n " ) ;
}
return - ENXIO ;
}
static DEVICE_ATTR_RO ( target_type ) ;
static ssize_t target_list_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
ssize_t offset = 0 ;
int i , rc = 0 ;
device_lock ( dev ) ;
for ( i = 0 ; i < cxld - > interleave_ways ; i + + ) {
struct cxl_dport * dport = cxld - > target [ i ] ;
struct cxl_dport * next = NULL ;
if ( ! dport )
break ;
if ( i + 1 < cxld - > interleave_ways )
next = cxld - > target [ i + 1 ] ;
rc = sysfs_emit_at ( buf , offset , " %d%s " , dport - > port_id ,
next ? " , " : " " ) ;
if ( rc < 0 )
break ;
offset + = rc ;
}
device_unlock ( dev ) ;
if ( rc < 0 )
return rc ;
rc = sysfs_emit_at ( buf , offset , " \n " ) ;
if ( rc < 0 )
return rc ;
return offset + rc ;
}
static DEVICE_ATTR_RO ( target_list ) ;
static struct attribute * cxl_decoder_base_attrs [ ] = {
& dev_attr_start . attr ,
& dev_attr_size . attr ,
& dev_attr_locked . attr ,
& dev_attr_target_list . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_base_attribute_group = {
. attrs = cxl_decoder_base_attrs ,
} ;
static struct attribute * cxl_decoder_root_attrs [ ] = {
& dev_attr_cap_pmem . attr ,
& dev_attr_cap_ram . attr ,
& dev_attr_cap_type2 . attr ,
& dev_attr_cap_type3 . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_root_attribute_group = {
. attrs = cxl_decoder_root_attrs ,
} ;
static const struct attribute_group * cxl_decoder_root_attribute_groups [ ] = {
& cxl_decoder_root_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
static struct attribute * cxl_decoder_switch_attrs [ ] = {
& dev_attr_target_type . attr ,
NULL ,
} ;
static struct attribute_group cxl_decoder_switch_attribute_group = {
. attrs = cxl_decoder_switch_attrs ,
} ;
static const struct attribute_group * cxl_decoder_switch_attribute_groups [ ] = {
& cxl_decoder_switch_attribute_group ,
& cxl_decoder_base_attribute_group ,
& cxl_base_attribute_group ,
NULL ,
} ;
static void cxl_decoder_release ( struct device * dev )
{
struct cxl_decoder * cxld = to_cxl_decoder ( dev ) ;
struct cxl_port * port = to_cxl_port ( dev - > parent ) ;
ida_free ( & port - > decoder_ida , cxld - > id ) ;
kfree ( cxld ) ;
}
static const struct device_type cxl_decoder_switch_type = {
. name = " cxl_decoder_switch " ,
. release = cxl_decoder_release ,
. groups = cxl_decoder_switch_attribute_groups ,
} ;
static const struct device_type cxl_decoder_root_type = {
. name = " cxl_decoder_root " ,
. release = cxl_decoder_release ,
. groups = cxl_decoder_root_attribute_groups ,
} ;
2021-06-15 16:18:17 -07:00
bool is_root_decoder ( struct device * dev )
{
return dev - > type = = & cxl_decoder_root_type ;
}
EXPORT_SYMBOL_GPL ( is_root_decoder ) ;
2021-06-09 09:43:29 -07:00
struct cxl_decoder * to_cxl_decoder ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , dev - > type - > release ! = cxl_decoder_release ,
" not a cxl_decoder device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_decoder , dev ) ;
}
2021-06-15 16:18:17 -07:00
EXPORT_SYMBOL_GPL ( to_cxl_decoder ) ;
2021-06-09 09:43:29 -07:00
2021-06-09 09:01:46 -07:00
static void cxl_dport_release ( struct cxl_dport * dport )
{
list_del ( & dport - > list ) ;
put_device ( dport - > dport ) ;
kfree ( dport ) ;
}
2021-06-09 09:01:35 -07:00
static void cxl_port_release ( struct device * dev )
{
struct cxl_port * port = to_cxl_port ( dev ) ;
2021-06-09 09:01:46 -07:00
struct cxl_dport * dport , * _d ;
2021-06-09 09:01:35 -07:00
2021-06-09 09:01:46 -07:00
device_lock ( dev ) ;
list_for_each_entry_safe ( dport , _d , & port - > dports , list )
cxl_dport_release ( dport ) ;
device_unlock ( dev ) ;
2021-06-09 09:01:35 -07:00
ida_free ( & cxl_port_ida , port - > id ) ;
kfree ( port ) ;
}
static const struct attribute_group * cxl_port_attribute_groups [ ] = {
& cxl_base_attribute_group ,
NULL ,
} ;
static const struct device_type cxl_port_type = {
. name = " cxl_port " ,
. release = cxl_port_release ,
. groups = cxl_port_attribute_groups ,
} ;
struct cxl_port * to_cxl_port ( struct device * dev )
{
if ( dev_WARN_ONCE ( dev , dev - > type ! = & cxl_port_type ,
" not a cxl_port device \n " ) )
return NULL ;
return container_of ( dev , struct cxl_port , dev ) ;
}
2021-06-09 09:01:46 -07:00
static void unregister_port ( void * _port )
2021-06-09 09:01:35 -07:00
{
2021-06-09 09:01:46 -07:00
struct cxl_port * port = _port ;
struct cxl_dport * dport ;
device_lock ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list ) {
char link_name [ CXL_TARGET_STRLEN ] ;
if ( snprintf ( link_name , CXL_TARGET_STRLEN , " dport%d " ,
dport - > port_id ) > = CXL_TARGET_STRLEN )
continue ;
sysfs_remove_link ( & port - > dev . kobj , link_name ) ;
}
device_unlock ( & port - > dev ) ;
device_unregister ( & port - > dev ) ;
2021-06-09 09:01:35 -07:00
}
static void cxl_unlink_uport ( void * _port )
{
struct cxl_port * port = _port ;
sysfs_remove_link ( & port - > dev . kobj , " uport " ) ;
}
static int devm_cxl_link_uport ( struct device * host , struct cxl_port * port )
{
int rc ;
rc = sysfs_create_link ( & port - > dev . kobj , & port - > uport - > kobj , " uport " ) ;
if ( rc )
return rc ;
return devm_add_action_or_reset ( host , cxl_unlink_uport , port ) ;
}
static struct cxl_port * cxl_port_alloc ( struct device * uport ,
resource_size_t component_reg_phys ,
struct cxl_port * parent_port )
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
port = kzalloc ( sizeof ( * port ) , GFP_KERNEL ) ;
if ( ! port )
return ERR_PTR ( - ENOMEM ) ;
rc = ida_alloc ( & cxl_port_ida , GFP_KERNEL ) ;
if ( rc < 0 )
goto err ;
port - > id = rc ;
/*
* The top - level cxl_port " cxl_root " does not have a cxl_port as
* its parent and it does not have any corresponding component
* registers as its decode is described by a fixed platform
* description .
*/
dev = & port - > dev ;
if ( parent_port )
dev - > parent = & parent_port - > dev ;
else
dev - > parent = uport ;
port - > uport = uport ;
port - > component_reg_phys = component_reg_phys ;
2021-06-09 09:43:29 -07:00
ida_init ( & port - > decoder_ida ) ;
2021-06-09 09:01:46 -07:00
INIT_LIST_HEAD ( & port - > dports ) ;
2021-06-09 09:01:35 -07:00
device_initialize ( dev ) ;
device_set_pm_not_required ( dev ) ;
dev - > bus = & cxl_bus_type ;
dev - > type = & cxl_port_type ;
return port ;
err :
kfree ( port ) ;
return ERR_PTR ( rc ) ;
}
/**
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
* @ host : host device for devm operations
* @ uport : " physical " device implementing this upstream port
* @ component_reg_phys : ( optional ) for configurable cxl_port instances
* @ parent_port : next hop up in the CXL memory decode hierarchy
*/
struct cxl_port * devm_cxl_add_port ( struct device * host , struct device * uport ,
resource_size_t component_reg_phys ,
struct cxl_port * parent_port )
{
struct cxl_port * port ;
struct device * dev ;
int rc ;
port = cxl_port_alloc ( uport , component_reg_phys , parent_port ) ;
if ( IS_ERR ( port ) )
return port ;
dev = & port - > dev ;
if ( parent_port )
rc = dev_set_name ( dev , " port%d " , port - > id ) ;
else
rc = dev_set_name ( dev , " root%d " , port - > id ) ;
if ( rc )
goto err ;
rc = device_add ( dev ) ;
if ( rc )
goto err ;
2021-06-09 09:01:46 -07:00
rc = devm_add_action_or_reset ( host , unregister_port , port ) ;
2021-06-09 09:01:35 -07:00
if ( rc )
return ERR_PTR ( rc ) ;
rc = devm_cxl_link_uport ( host , port ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return port ;
err :
put_device ( dev ) ;
return ERR_PTR ( rc ) ;
}
EXPORT_SYMBOL_GPL ( devm_cxl_add_port ) ;
2021-06-09 09:01:46 -07:00
static struct cxl_dport * find_dport ( struct cxl_port * port , int id )
{
struct cxl_dport * dport ;
device_lock_assert ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > port_id = = id )
return dport ;
return NULL ;
}
static int add_dport ( struct cxl_port * port , struct cxl_dport * new )
{
struct cxl_dport * dup ;
device_lock ( & port - > dev ) ;
dup = find_dport ( port , new - > port_id ) ;
if ( dup )
dev_err ( & port - > dev ,
" unable to add dport%d-%s non-unique port id (%s) \n " ,
new - > port_id , dev_name ( new - > dport ) ,
dev_name ( dup - > dport ) ) ;
else
list_add_tail ( & new - > list , & port - > dports ) ;
device_unlock ( & port - > dev ) ;
return dup ? - EEXIST : 0 ;
}
/**
* cxl_add_dport - append downstream port data to a cxl_port
* @ port : the cxl_port that references this dport
* @ dport_dev : firmware or PCI device representing the dport
* @ port_id : identifier for this dport in a decoder ' s target list
* @ component_reg_phys : optional location of CXL component registers
*
* Note that all allocations and links are undone by cxl_port deletion
* and release .
*/
int cxl_add_dport ( struct cxl_port * port , struct device * dport_dev , int port_id ,
resource_size_t component_reg_phys )
{
char link_name [ CXL_TARGET_STRLEN ] ;
struct cxl_dport * dport ;
int rc ;
if ( snprintf ( link_name , CXL_TARGET_STRLEN , " dport%d " , port_id ) > =
CXL_TARGET_STRLEN )
return - EINVAL ;
dport = kzalloc ( sizeof ( * dport ) , GFP_KERNEL ) ;
if ( ! dport )
return - ENOMEM ;
INIT_LIST_HEAD ( & dport - > list ) ;
dport - > dport = get_device ( dport_dev ) ;
dport - > port_id = port_id ;
dport - > component_reg_phys = component_reg_phys ;
dport - > port = port ;
rc = add_dport ( port , dport ) ;
if ( rc )
goto err ;
rc = sysfs_create_link ( & port - > dev . kobj , & dport_dev - > kobj , link_name ) ;
if ( rc )
goto err ;
return 0 ;
err :
cxl_dport_release ( dport ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( cxl_add_dport ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
static int decoder_populate_targets ( struct device * host ,
struct cxl_decoder * cxld ,
struct cxl_port * port , int * target_map ,
int nr_targets )
{
int rc = 0 , i ;
if ( ! target_map )
return 0 ;
device_lock ( & port - > dev ) ;
for ( i = 0 ; i < nr_targets ; i + + ) {
struct cxl_dport * dport = find_dport ( port , target_map [ i ] ) ;
if ( ! dport ) {
rc = - ENXIO ;
break ;
}
dev_dbg ( host , " %s: target: %d \n " , dev_name ( dport - > dport ) , i ) ;
cxld - > target [ i ] = dport ;
}
device_unlock ( & port - > dev ) ;
return rc ;
}
2021-06-09 09:43:29 -07:00
static struct cxl_decoder *
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
cxl_decoder_alloc ( struct device * host , struct cxl_port * port , int nr_targets ,
resource_size_t base , resource_size_t len ,
int interleave_ways , int interleave_granularity ,
enum cxl_decoder_type type , unsigned long flags ,
int * target_map )
2021-06-09 09:43:29 -07:00
{
struct cxl_decoder * cxld ;
struct device * dev ;
int rc = 0 ;
if ( interleave_ways < 1 )
return ERR_PTR ( - EINVAL ) ;
device_lock ( & port - > dev ) ;
if ( list_empty ( & port - > dports ) )
rc = - EINVAL ;
device_unlock ( & port - > dev ) ;
if ( rc )
return ERR_PTR ( rc ) ;
cxld = kzalloc ( struct_size ( cxld , target , nr_targets ) , GFP_KERNEL ) ;
if ( ! cxld )
return ERR_PTR ( - ENOMEM ) ;
rc = ida_alloc ( & port - > decoder_ida , GFP_KERNEL ) ;
if ( rc < 0 )
goto err ;
* cxld = ( struct cxl_decoder ) {
. id = rc ,
. range = {
. start = base ,
. end = base + len - 1 ,
} ,
. flags = flags ,
. interleave_ways = interleave_ways ,
. interleave_granularity = interleave_granularity ,
. target_type = type ,
} ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
rc = decoder_populate_targets ( host , cxld , port , target_map , nr_targets ) ;
if ( rc )
goto err ;
2021-06-09 09:43:29 -07:00
dev = & cxld - > dev ;
device_initialize ( dev ) ;
device_set_pm_not_required ( dev ) ;
dev - > parent = & port - > dev ;
dev - > bus = & cxl_bus_type ;
/* root ports do not have a cxl_port_type parent */
if ( port - > dev . parent - > type = = & cxl_port_type )
dev - > type = & cxl_decoder_switch_type ;
else
dev - > type = & cxl_decoder_root_type ;
return cxld ;
err :
kfree ( cxld ) ;
return ERR_PTR ( rc ) ;
}
struct cxl_decoder *
devm_cxl_add_decoder ( struct device * host , struct cxl_port * port , int nr_targets ,
resource_size_t base , resource_size_t len ,
int interleave_ways , int interleave_granularity ,
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
enum cxl_decoder_type type , unsigned long flags ,
int * target_map )
2021-06-09 09:43:29 -07:00
{
struct cxl_decoder * cxld ;
struct device * dev ;
int rc ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
if ( nr_targets > CXL_DECODER_MAX_INTERLEAVE )
return ERR_PTR ( - EINVAL ) ;
cxld = cxl_decoder_alloc ( host , port , nr_targets , base , len ,
interleave_ways , interleave_granularity , type ,
flags , target_map ) ;
2021-06-09 09:43:29 -07:00
if ( IS_ERR ( cxld ) )
return cxld ;
dev = & cxld - > dev ;
rc = dev_set_name ( dev , " decoder%d.%d " , port - > id , cxld - > id ) ;
if ( rc )
goto err ;
rc = device_add ( dev ) ;
if ( rc )
goto err ;
2021-08-02 10:29:49 -07:00
rc = devm_add_action_or_reset ( host , unregister_cxl_dev , dev ) ;
2021-06-09 09:43:29 -07:00
if ( rc )
return ERR_PTR ( rc ) ;
return cxld ;
err :
put_device ( dev ) ;
return ERR_PTR ( rc ) ;
}
EXPORT_SYMBOL_GPL ( devm_cxl_add_decoder ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-08 22:13:10 -07:00
/*
* Per the CXL specification ( 8.2 .5 .12 CXL HDM Decoder Capability Structure )
* single ported host - bridges need not publish a decoder capability when a
* passthrough decode can be assumed , i . e . all transactions that the uport sees
* are claimed and passed to the single dport . Default the range a 0 - base
* 0 - length until the first CXL region is activated .
*/
struct cxl_decoder * devm_cxl_add_passthrough_decoder ( struct device * host ,
struct cxl_port * port )
{
struct cxl_dport * dport ;
int target_map [ 1 ] ;
device_lock ( & port - > dev ) ;
dport = list_first_entry_or_null ( & port - > dports , typeof ( * dport ) , list ) ;
device_unlock ( & port - > dev ) ;
if ( ! dport )
return ERR_PTR ( - ENXIO ) ;
target_map [ 0 ] = dport - > port_id ;
return devm_cxl_add_decoder ( host , port , 1 , 0 , 0 , 1 , PAGE_SIZE ,
CXL_DECODER_EXPANDER , 0 , target_map ) ;
}
EXPORT_SYMBOL_GPL ( devm_cxl_add_passthrough_decoder ) ;
2021-06-15 16:18:11 -07:00
/**
* __cxl_driver_register - register a driver for the cxl bus
* @ cxl_drv : cxl driver structure to attach
* @ owner : owning module / driver
* @ modname : KBUILD_MODNAME for parent driver
*/
int __cxl_driver_register ( struct cxl_driver * cxl_drv , struct module * owner ,
const char * modname )
{
if ( ! cxl_drv - > probe ) {
pr_debug ( " %s ->probe() must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > name ) {
pr_debug ( " %s ->name must be specified \n " , modname ) ;
return - EINVAL ;
}
if ( ! cxl_drv - > id ) {
pr_debug ( " %s ->id must be specified \n " , modname ) ;
return - EINVAL ;
}
cxl_drv - > drv . bus = & cxl_bus_type ;
cxl_drv - > drv . owner = owner ;
cxl_drv - > drv . mod_name = modname ;
cxl_drv - > drv . name = cxl_drv - > name ;
return driver_register ( & cxl_drv - > drv ) ;
}
EXPORT_SYMBOL_GPL ( __cxl_driver_register ) ;
void cxl_driver_unregister ( struct cxl_driver * cxl_drv )
{
driver_unregister ( & cxl_drv - > drv ) ;
}
EXPORT_SYMBOL_GPL ( cxl_driver_unregister ) ;
static int cxl_device_id ( struct device * dev )
{
2021-06-15 16:18:17 -07:00
if ( dev - > type = = & cxl_nvdimm_bridge_type )
return CXL_DEVICE_NVDIMM_BRIDGE ;
2021-06-15 16:36:31 -07:00
if ( dev - > type = = & cxl_nvdimm_type )
return CXL_DEVICE_NVDIMM ;
2021-06-15 16:18:11 -07:00
return 0 ;
}
static int cxl_bus_uevent ( struct device * dev , struct kobj_uevent_env * env )
{
return add_uevent_var ( env , " MODALIAS= " CXL_MODALIAS_FMT ,
cxl_device_id ( dev ) ) ;
}
static int cxl_bus_match ( struct device * dev , struct device_driver * drv )
{
return cxl_device_id ( dev ) = = to_cxl_drv ( drv ) - > id ;
}
static int cxl_bus_probe ( struct device * dev )
{
return to_cxl_drv ( dev - > driver ) - > probe ( dev ) ;
}
2021-07-13 21:35:22 +02:00
static void cxl_bus_remove ( struct device * dev )
2021-06-15 16:18:11 -07:00
{
struct cxl_driver * cxl_drv = to_cxl_drv ( dev - > driver ) ;
if ( cxl_drv - > remove )
cxl_drv - > remove ( dev ) ;
}
2021-02-16 20:09:52 -08:00
struct bus_type cxl_bus_type = {
. name = " cxl " ,
2021-06-15 16:18:11 -07:00
. uevent = cxl_bus_uevent ,
. match = cxl_bus_match ,
. probe = cxl_bus_probe ,
. remove = cxl_bus_remove ,
2021-02-16 20:09:52 -08:00
} ;
EXPORT_SYMBOL_GPL ( cxl_bus_type ) ;
2021-05-13 22:22:00 -07:00
static __init int cxl_core_init ( void )
2021-02-16 20:09:52 -08:00
{
2021-08-02 10:30:05 -07:00
int rc ;
2021-09-08 22:12:32 -07:00
cxl_mbox_init ( ) ;
2021-08-02 10:30:05 -07:00
rc = cxl_memdev_init ( ) ;
if ( rc )
return rc ;
rc = bus_register ( & cxl_bus_type ) ;
if ( rc )
goto err ;
return 0 ;
err :
cxl_memdev_exit ( ) ;
2021-09-08 22:12:32 -07:00
cxl_mbox_exit ( ) ;
2021-08-02 10:30:05 -07:00
return rc ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
static void cxl_core_exit ( void )
2021-02-16 20:09:52 -08:00
{
bus_unregister ( & cxl_bus_type ) ;
2021-08-02 10:30:05 -07:00
cxl_memdev_exit ( ) ;
2021-09-08 22:12:32 -07:00
cxl_mbox_exit ( ) ;
2021-02-16 20:09:52 -08:00
}
2021-05-13 22:22:00 -07:00
module_init ( cxl_core_init ) ;
module_exit ( cxl_core_exit ) ;
2021-02-16 20:09:52 -08:00
MODULE_LICENSE ( " GPL v2 " ) ;