2021-06-09 19:01:35 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
# include <linux/platform_device.h>
# include <linux/module.h>
# include <linux/device.h>
# include <linux/kernel.h>
# include <linux/acpi.h>
2021-06-09 19:01:51 +03:00
# include <linux/pci.h>
2021-06-09 19:01:35 +03:00
# include "cxl.h"
2021-06-18 02:12:16 +03:00
/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
# define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
# define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
static unsigned long cfmws_to_decoder_flags ( int restrictions )
{
unsigned long flags = 0 ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2 )
flags | = CXL_DECODER_F_TYPE2 ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3 )
flags | = CXL_DECODER_F_TYPE3 ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE )
flags | = CXL_DECODER_F_RAM ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM )
flags | = CXL_DECODER_F_PMEM ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED )
flags | = CXL_DECODER_F_LOCK ;
return flags ;
}
static int cxl_acpi_cfmws_verify ( struct device * dev ,
struct acpi_cedt_cfmws * cfmws )
{
int expected_len ;
if ( cfmws - > interleave_arithmetic ! = ACPI_CEDT_CFMWS_ARITHMETIC_MODULO ) {
dev_err ( dev , " CFMWS Unsupported Interleave Arithmetic \n " ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( cfmws - > base_hpa , SZ_256M ) ) {
dev_err ( dev , " CFMWS Base HPA not 256MB aligned \n " ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( cfmws - > window_size , SZ_256M ) ) {
dev_err ( dev , " CFMWS Window Size not 256MB aligned \n " ) ;
return - EINVAL ;
}
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 08:13:10 +03:00
if ( CFMWS_INTERLEAVE_WAYS ( cfmws ) > CXL_DECODER_MAX_INTERLEAVE ) {
dev_err ( dev , " CFMWS Interleave Ways (%d) too large \n " ,
CFMWS_INTERLEAVE_WAYS ( cfmws ) ) ;
return - EINVAL ;
}
2021-06-18 02:12:16 +03:00
expected_len = struct_size ( ( cfmws ) , interleave_targets ,
CFMWS_INTERLEAVE_WAYS ( cfmws ) ) ;
if ( cfmws - > header . length < expected_len ) {
dev_err ( dev , " CFMWS length %d less than expected %d \n " ,
cfmws - > header . length , expected_len ) ;
return - EINVAL ;
}
if ( cfmws - > header . length > expected_len )
dev_dbg ( dev , " CFMWS length %d greater than expected %d \n " ,
cfmws - > header . length , expected_len ) ;
return 0 ;
}
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context {
struct device * dev ;
struct cxl_port * root_port ;
} ;
static int cxl_parse_cfmws ( union acpi_subtable_headers * header , void * arg ,
const unsigned long end )
2021-06-18 02:12:16 +03:00
{
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 08:13:10 +03:00
int target_map [ CXL_DECODER_MAX_INTERLEAVE ] ;
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context * ctx = arg ;
struct cxl_port * root_port = ctx - > root_port ;
struct device * dev = ctx - > dev ;
2021-06-18 02:12:16 +03:00
struct acpi_cedt_cfmws * cfmws ;
struct cxl_decoder * cxld ;
2021-10-29 22:51:48 +03:00
int rc , i ;
2021-06-18 02:12:16 +03:00
2021-10-29 22:51:48 +03:00
cfmws = ( struct acpi_cedt_cfmws * ) header ;
2021-06-18 02:12:16 +03:00
2021-10-29 22:51:48 +03:00
rc = cxl_acpi_cfmws_verify ( dev , cfmws ) ;
if ( rc ) {
dev_err ( dev , " CFMWS range %#llx-%#llx not registered \n " ,
cfmws - > base_hpa ,
2021-09-21 22:22:16 +03:00
cfmws - > base_hpa + cfmws - > window_size - 1 ) ;
2021-10-29 22:51:48 +03:00
return 0 ;
2021-06-18 02:12:16 +03:00
}
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
for ( i = 0 ; i < CFMWS_INTERLEAVE_WAYS ( cfmws ) ; i + + )
target_map [ i ] = cfmws - > interleave_targets [ i ] ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
cxld = cxl_decoder_alloc ( root_port , CFMWS_INTERLEAVE_WAYS ( cfmws ) ) ;
if ( IS_ERR ( cxld ) )
return 0 ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
cxld - > flags = cfmws_to_decoder_flags ( cfmws - > restrictions ) ;
cxld - > target_type = CXL_DECODER_EXPANDER ;
cxld - > range = ( struct range ) {
. start = cfmws - > base_hpa ,
. end = cfmws - > base_hpa + cfmws - > window_size - 1 ,
} ;
cxld - > interleave_ways = CFMWS_INTERLEAVE_WAYS ( cfmws ) ;
cxld - > interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY ( cfmws ) ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
rc = cxl_decoder_add ( cxld , target_map ) ;
if ( rc )
put_device ( & cxld - > dev ) ;
else
rc = cxl_decoder_autoremove ( dev , cxld ) ;
if ( rc ) {
dev_err ( dev , " Failed to add decoder for %#llx-%#llx \n " ,
cfmws - > base_hpa ,
cfmws - > base_hpa + cfmws - > window_size - 1 ) ;
return 0 ;
2021-06-18 02:12:15 +03:00
}
2021-10-29 22:51:59 +03:00
dev_dbg ( dev , " add: %s node: %d range %#llx-%#llx \n " ,
dev_name ( & cxld - > dev ) , phys_to_target_node ( cxld - > range . start ) ,
2021-10-29 22:51:48 +03:00
cfmws - > base_hpa , cfmws - > base_hpa + cfmws - > window_size - 1 ) ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
return 0 ;
2021-06-18 02:12:15 +03:00
}
2021-09-14 22:14:22 +03:00
__mock int match_add_root_ports ( struct pci_dev * pdev , void * data )
2021-06-09 19:01:51 +03:00
{
struct cxl_walk_context * ctx = data ;
struct pci_bus * root_bus = ctx - > root ;
struct cxl_port * port = ctx - > port ;
int type = pci_pcie_type ( pdev ) ;
struct device * dev = ctx - > dev ;
u32 lnkcap , port_num ;
int rc ;
if ( pdev - > bus ! = root_bus )
return 0 ;
if ( ! pci_is_pcie ( pdev ) )
return 0 ;
if ( type ! = PCI_EXP_TYPE_ROOT_PORT )
return 0 ;
if ( pci_read_config_dword ( pdev , pci_pcie_cap ( pdev ) + PCI_EXP_LNKCAP ,
& lnkcap ) ! = PCIBIOS_SUCCESSFUL )
return 0 ;
/* TODO walk DVSEC to find component register base */
port_num = FIELD_GET ( PCI_EXP_LNKCAP_PN , lnkcap ) ;
rc = cxl_add_dport ( port , & pdev - > dev , port_num , CXL_RESOURCE_NONE ) ;
if ( rc ) {
ctx - > error = rc ;
return rc ;
}
ctx - > count + + ;
dev_dbg ( dev , " add dport%d: %s \n " , port_num , dev_name ( & pdev - > dev ) ) ;
return 0 ;
}
2021-06-18 02:12:15 +03:00
static struct cxl_dport * find_dport_by_dev ( struct cxl_port * port , struct device * dev )
{
struct cxl_dport * dport ;
device_lock ( & port - > dev ) ;
list_for_each_entry ( dport , & port - > dports , list )
if ( dport - > dport = = dev ) {
device_unlock ( & port - > dev ) ;
return dport ;
}
device_unlock ( & port - > dev ) ;
return NULL ;
}
2021-09-14 22:14:22 +03:00
__mock struct acpi_device * to_cxl_host_bridge ( struct device * host ,
struct device * dev )
2021-06-09 19:01:46 +03:00
{
struct acpi_device * adev = to_acpi_device ( dev ) ;
2021-09-04 05:20:39 +03:00
if ( ! acpi_pci_find_root ( adev - > handle ) )
return NULL ;
2021-06-09 19:01:46 +03:00
if ( strcmp ( acpi_device_hid ( adev ) , " ACPI0016 " ) = = 0 )
return adev ;
return NULL ;
}
2021-06-09 19:01:51 +03:00
/*
* A host bridge is a dport to a CFMWS decode and it is a uport to the
* dport ( PCIe Root Ports ) in the host bridge .
*/
static int add_host_bridge_uport ( struct device * match , void * arg )
{
struct cxl_port * root_port = arg ;
struct device * host = root_port - > dev . parent ;
2021-09-14 22:14:22 +03:00
struct acpi_device * bridge = to_cxl_host_bridge ( host , match ) ;
2021-06-09 19:01:51 +03:00
struct acpi_pci_root * pci_root ;
struct cxl_walk_context ctx ;
2021-09-21 22:22:16 +03:00
int single_port_map [ 1 ] , rc ;
2021-06-09 19:43:29 +03:00
struct cxl_decoder * cxld ;
2021-06-18 02:12:15 +03:00
struct cxl_dport * dport ;
2021-06-09 19:01:51 +03:00
struct cxl_port * port ;
if ( ! bridge )
return 0 ;
2021-06-18 02:12:15 +03:00
dport = find_dport_by_dev ( root_port , match ) ;
if ( ! dport ) {
dev_dbg ( host , " host bridge expected and not found \n " ) ;
2021-10-08 00:34:26 +03:00
return 0 ;
2021-06-18 02:12:15 +03:00
}
port = devm_cxl_add_port ( host , match , dport - > component_reg_phys ,
root_port ) ;
2021-06-09 19:01:51 +03:00
if ( IS_ERR ( port ) )
return PTR_ERR ( port ) ;
dev_dbg ( host , " %s: add: %s \n " , dev_name ( match ) , dev_name ( & port - > dev ) ) ;
2021-09-04 05:20:39 +03:00
/*
* Note that this lookup already succeeded in
* to_cxl_host_bridge ( ) , so no need to check for failure here
*/
pci_root = acpi_pci_find_root ( bridge - > handle ) ;
2021-06-09 19:01:51 +03:00
ctx = ( struct cxl_walk_context ) {
. dev = host ,
. root = pci_root - > bus ,
. port = port ,
} ;
pci_walk_bus ( pci_root - > bus , match_add_root_ports , & ctx ) ;
if ( ctx . count = = 0 )
return - ENODEV ;
2021-06-09 19:43:29 +03:00
if ( ctx . error )
return ctx . error ;
2021-09-21 22:22:16 +03:00
if ( ctx . count > 1 )
return 0 ;
2021-06-09 19:43:29 +03:00
/* TODO: Scan CHBCR for HDM Decoder resources */
/*
2021-09-21 22:22:16 +03:00
* Per the CXL specification ( 8.2 .5 .12 CXL HDM Decoder Capability
* Structure ) single ported host - bridges need not publish a decoder
* capability when a passthrough decode can be assumed , i . e . all
* transactions that the uport sees are claimed and passed to the single
* dport . Disable the range until the first CXL region is enumerated /
* activated .
2021-06-09 19:43:29 +03:00
*/
2021-09-21 22:22:16 +03:00
cxld = cxl_decoder_alloc ( port , 1 ) ;
if ( IS_ERR ( cxld ) )
return PTR_ERR ( cxld ) ;
cxld - > interleave_ways = 1 ;
cxld - > interleave_granularity = PAGE_SIZE ;
cxld - > target_type = CXL_DECODER_EXPANDER ;
cxld - > range = ( struct range ) {
. start = 0 ,
. end = - 1 ,
} ;
2021-06-09 19:43:29 +03:00
2021-09-21 22:22:16 +03:00
device_lock ( & port - > dev ) ;
dport = list_first_entry ( & port - > dports , typeof ( * dport ) , list ) ;
device_unlock ( & port - > dev ) ;
2021-06-09 19:43:29 +03:00
2021-09-21 22:22:16 +03:00
single_port_map [ 0 ] = dport - > port_id ;
rc = cxl_decoder_add ( cxld , single_port_map ) ;
if ( rc )
put_device ( & cxld - > dev ) ;
else
rc = cxl_decoder_autoremove ( host , cxld ) ;
if ( rc = = 0 )
dev_dbg ( host , " add: %s \n " , dev_name ( & cxld - > dev ) ) ;
return rc ;
2021-06-09 19:01:51 +03:00
}
2021-10-29 22:51:48 +03:00
struct cxl_chbs_context {
2021-10-29 22:51:53 +03:00
struct device * dev ;
2021-10-29 22:51:48 +03:00
unsigned long long uid ;
resource_size_t chbcr ;
} ;
static int cxl_get_chbcr ( union acpi_subtable_headers * header , void * arg ,
const unsigned long end )
{
struct cxl_chbs_context * ctx = arg ;
struct acpi_cedt_chbs * chbs ;
if ( ctx - > chbcr )
return 0 ;
chbs = ( struct acpi_cedt_chbs * ) header ;
if ( ctx - > uid ! = chbs - > uid )
return 0 ;
ctx - > chbcr = chbs - > base ;
return 0 ;
}
2021-06-09 19:01:46 +03:00
static int add_host_bridge_dport ( struct device * match , void * arg )
{
int rc ;
acpi_status status ;
unsigned long long uid ;
2021-10-29 22:51:48 +03:00
struct cxl_chbs_context ctx ;
2021-06-09 19:01:46 +03:00
struct cxl_port * root_port = arg ;
struct device * host = root_port - > dev . parent ;
2021-09-14 22:14:22 +03:00
struct acpi_device * bridge = to_cxl_host_bridge ( host , match ) ;
2021-06-09 19:01:46 +03:00
if ( ! bridge )
return 0 ;
status = acpi_evaluate_integer ( bridge - > handle , METHOD_NAME__UID , NULL ,
& uid ) ;
if ( status ! = AE_OK ) {
dev_err ( host , " unable to retrieve _UID of %s \n " ,
dev_name ( match ) ) ;
return - ENODEV ;
}
2021-10-29 22:51:48 +03:00
ctx = ( struct cxl_chbs_context ) {
2021-10-29 22:51:53 +03:00
. dev = host ,
2021-10-29 22:51:48 +03:00
. uid = uid ,
} ;
acpi_table_parse_cedt ( ACPI_CEDT_TYPE_CHBS , cxl_get_chbcr , & ctx ) ;
if ( ctx . chbcr = = 0 ) {
2021-10-08 00:34:26 +03:00
dev_warn ( host , " No CHBS found for Host Bridge: %s \n " ,
dev_name ( match ) ) ;
return 0 ;
}
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
rc = cxl_add_dport ( root_port , match , uid , ctx . chbcr ) ;
2021-06-09 19:01:46 +03:00
if ( rc ) {
dev_err ( host , " failed to add downstream port: %s \n " ,
dev_name ( match ) ) ;
return rc ;
}
dev_dbg ( host , " add dport%llu: %s \n " , uid , dev_name ( match ) ) ;
return 0 ;
}
2021-06-16 02:18:17 +03:00
static int add_root_nvdimm_bridge ( struct device * match , void * data )
{
struct cxl_decoder * cxld ;
struct cxl_port * root_port = data ;
struct cxl_nvdimm_bridge * cxl_nvb ;
struct device * host = root_port - > dev . parent ;
if ( ! is_root_decoder ( match ) )
return 0 ;
cxld = to_cxl_decoder ( match ) ;
if ( ! ( cxld - > flags & CXL_DECODER_F_PMEM ) )
return 0 ;
cxl_nvb = devm_cxl_add_nvdimm_bridge ( host , root_port ) ;
if ( IS_ERR ( cxl_nvb ) ) {
dev_dbg ( host , " failed to register pmem \n " ) ;
return PTR_ERR ( cxl_nvb ) ;
}
dev_dbg ( host , " %s: add: %s \n " , dev_name ( & root_port - > dev ) ,
dev_name ( & cxl_nvb - > dev ) ) ;
return 1 ;
}
2021-06-09 19:01:35 +03:00
static int cxl_acpi_probe ( struct platform_device * pdev )
{
2021-06-09 19:01:51 +03:00
int rc ;
2021-06-09 19:01:35 +03:00
struct cxl_port * root_port ;
struct device * host = & pdev - > dev ;
2021-06-09 19:01:46 +03:00
struct acpi_device * adev = ACPI_COMPANION ( host ) ;
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context ctx ;
2021-06-09 19:01:35 +03:00
root_port = devm_cxl_add_port ( host , host , CXL_RESOURCE_NONE , NULL ) ;
if ( IS_ERR ( root_port ) )
return PTR_ERR ( root_port ) ;
dev_dbg ( host , " add: %s \n " , dev_name ( & root_port - > dev ) ) ;
2021-06-09 19:01:51 +03:00
rc = bus_for_each_dev ( adev - > dev . bus , NULL , root_port ,
add_host_bridge_dport ) ;
2021-10-29 22:51:48 +03:00
if ( rc < 0 )
return rc ;
2021-06-09 19:01:51 +03:00
2021-10-29 22:51:48 +03:00
ctx = ( struct cxl_cfmws_context ) {
. dev = host ,
. root_port = root_port ,
} ;
acpi_table_parse_cedt ( ACPI_CEDT_TYPE_CFMWS , cxl_parse_cfmws , & ctx ) ;
2021-06-18 02:12:16 +03:00
2021-06-09 19:01:51 +03:00
/*
* Root level scanned with host - bridge as dports , now scan host - bridges
* for their role as CXL uports to their CXL - capable PCIe Root Ports .
*/
2021-06-16 02:18:17 +03:00
rc = bus_for_each_dev ( adev - > dev . bus , NULL , root_port ,
add_host_bridge_uport ) ;
2021-10-29 22:51:48 +03:00
if ( rc < 0 )
return rc ;
2021-06-16 02:18:17 +03:00
if ( IS_ENABLED ( CONFIG_CXL_PMEM ) )
rc = device_for_each_child ( & root_port - > dev , root_port ,
add_root_nvdimm_bridge ) ;
if ( rc < 0 )
return rc ;
2021-10-29 22:51:48 +03:00
2021-06-16 02:18:17 +03:00
return 0 ;
2021-06-09 19:01:35 +03:00
}
static const struct acpi_device_id cxl_acpi_ids [ ] = {
2021-10-29 22:51:48 +03:00
{ " ACPI0017 " } ,
2021-09-14 22:14:22 +03:00
{ } ,
2021-06-09 19:01:35 +03:00
} ;
MODULE_DEVICE_TABLE ( acpi , cxl_acpi_ids ) ;
static struct platform_driver cxl_acpi_driver = {
. probe = cxl_acpi_probe ,
. driver = {
. name = KBUILD_MODNAME ,
. acpi_match_table = cxl_acpi_ids ,
} ,
} ;
module_platform_driver ( cxl_acpi_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_IMPORT_NS ( CXL ) ;
2021-10-29 22:51:48 +03:00
MODULE_IMPORT_NS ( ACPI ) ;