2021-06-09 19:01:35 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
# include <linux/platform_device.h>
# include <linux/module.h>
# include <linux/device.h>
# include <linux/kernel.h>
# include <linux/acpi.h>
2021-06-09 19:01:51 +03:00
# include <linux/pci.h>
2022-01-24 03:30:25 +03:00
# include "cxlpci.h"
2021-06-09 19:01:35 +03:00
# include "cxl.h"
2021-06-18 02:12:16 +03:00
static unsigned long cfmws_to_decoder_flags ( int restrictions )
{
2022-01-26 08:24:04 +03:00
unsigned long flags = CXL_DECODER_F_ENABLE ;
2021-06-18 02:12:16 +03:00
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2 )
flags | = CXL_DECODER_F_TYPE2 ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3 )
flags | = CXL_DECODER_F_TYPE3 ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE )
flags | = CXL_DECODER_F_RAM ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM )
flags | = CXL_DECODER_F_PMEM ;
if ( restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED )
flags | = CXL_DECODER_F_LOCK ;
return flags ;
}
static int cxl_acpi_cfmws_verify ( struct device * dev ,
struct acpi_cedt_cfmws * cfmws )
{
2022-05-23 03:04:27 +03:00
int rc , expected_len ;
unsigned int ways ;
2021-06-18 02:12:16 +03:00
if ( cfmws - > interleave_arithmetic ! = ACPI_CEDT_CFMWS_ARITHMETIC_MODULO ) {
dev_err ( dev , " CFMWS Unsupported Interleave Arithmetic \n " ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( cfmws - > base_hpa , SZ_256M ) ) {
dev_err ( dev , " CFMWS Base HPA not 256MB aligned \n " ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( cfmws - > window_size , SZ_256M ) ) {
dev_err ( dev , " CFMWS Window Size not 256MB aligned \n " ) ;
return - EINVAL ;
}
2022-05-23 03:04:27 +03:00
rc = cxl_to_ways ( cfmws - > interleave_ways , & ways ) ;
if ( rc ) {
dev_err ( dev , " CFMWS Interleave Ways (%d) invalid \n " ,
cfmws - > interleave_ways ) ;
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 08:13:10 +03:00
return - EINVAL ;
}
2022-05-23 03:04:27 +03:00
expected_len = struct_size ( cfmws , interleave_targets , ways ) ;
2021-06-18 02:12:16 +03:00
if ( cfmws - > header . length < expected_len ) {
dev_err ( dev , " CFMWS length %d less than expected %d \n " ,
cfmws - > header . length , expected_len ) ;
return - EINVAL ;
}
if ( cfmws - > header . length > expected_len )
dev_dbg ( dev , " CFMWS length %d greater than expected %d \n " ,
cfmws - > header . length , expected_len ) ;
return 0 ;
}
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context {
struct device * dev ;
struct cxl_port * root_port ;
2022-07-13 04:37:54 +03:00
struct resource * cxl_res ;
int id ;
2021-10-29 22:51:48 +03:00
} ;
static int cxl_parse_cfmws ( union acpi_subtable_headers * header , void * arg ,
const unsigned long end )
2021-06-18 02:12:16 +03:00
{
cxl/bus: Populate the target list at decoder create
As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.
Walk the hosting port's dport list and populate based on the passed in
map.
Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.
Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0
After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3
Where root2 is a CXL topology root object generated by 'cxl_test'.
Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-09 08:13:10 +03:00
int target_map [ CXL_DECODER_MAX_INTERLEAVE ] ;
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context * ctx = arg ;
struct cxl_port * root_port = ctx - > root_port ;
2022-07-13 04:37:54 +03:00
struct resource * cxl_res = ctx - > cxl_res ;
2022-07-13 04:38:26 +03:00
struct cxl_root_decoder * cxlrd ;
2021-10-29 22:51:48 +03:00
struct device * dev = ctx - > dev ;
2021-06-18 02:12:16 +03:00
struct acpi_cedt_cfmws * cfmws ;
struct cxl_decoder * cxld ;
2022-05-23 03:04:27 +03:00
unsigned int ways , i , ig ;
2022-07-13 04:37:54 +03:00
struct resource * res ;
2022-05-23 03:04:27 +03:00
int rc ;
2021-06-18 02:12:16 +03:00
2021-10-29 22:51:48 +03:00
cfmws = ( struct acpi_cedt_cfmws * ) header ;
2021-06-18 02:12:16 +03:00
2021-10-29 22:51:48 +03:00
rc = cxl_acpi_cfmws_verify ( dev , cfmws ) ;
if ( rc ) {
dev_err ( dev , " CFMWS range %#llx-%#llx not registered \n " ,
cfmws - > base_hpa ,
2021-09-21 22:22:16 +03:00
cfmws - > base_hpa + cfmws - > window_size - 1 ) ;
2021-10-29 22:51:48 +03:00
return 0 ;
2021-06-18 02:12:16 +03:00
}
2021-06-18 02:12:15 +03:00
2022-05-23 03:04:27 +03:00
rc = cxl_to_ways ( cfmws - > interleave_ways , & ways ) ;
if ( rc )
return rc ;
rc = cxl_to_granularity ( cfmws - > granularity , & ig ) ;
if ( rc )
return rc ;
for ( i = 0 ; i < ways ; i + + )
2021-10-29 22:51:48 +03:00
target_map [ i ] = cfmws - > interleave_targets [ i ] ;
2021-06-18 02:12:15 +03:00
2022-07-13 04:37:54 +03:00
res = kzalloc ( sizeof ( * res ) , GFP_KERNEL ) ;
if ( ! res )
return - ENOMEM ;
res - > name = kasprintf ( GFP_KERNEL , " CXL Window %d " , ctx - > id + + ) ;
if ( ! res - > name )
goto err_name ;
res - > start = cfmws - > base_hpa ;
res - > end = cfmws - > base_hpa + cfmws - > window_size - 1 ;
res - > flags = IORESOURCE_MEM ;
/* add to the local resource tracking to establish a sort order */
rc = insert_resource ( cxl_res , res ) ;
if ( rc )
goto err_insert ;
2022-07-13 04:38:26 +03:00
cxlrd = cxl_root_decoder_alloc ( root_port , ways ) ;
if ( IS_ERR ( cxlrd ) )
2021-10-29 22:51:48 +03:00
return 0 ;
2021-06-18 02:12:15 +03:00
2022-07-13 04:38:26 +03:00
cxld = & cxlrd - > cxlsd . cxld ;
2021-10-29 22:51:48 +03:00
cxld - > flags = cfmws_to_decoder_flags ( cfmws - > restrictions ) ;
cxld - > target_type = CXL_DECODER_EXPANDER ;
2022-05-19 04:02:39 +03:00
cxld - > hpa_range = ( struct range ) {
2022-07-13 04:37:54 +03:00
. start = res - > start ,
. end = res - > end ,
2022-05-19 04:02:39 +03:00
} ;
2022-05-23 03:04:27 +03:00
cxld - > interleave_ways = ways ;
2022-07-23 03:56:09 +03:00
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
*/
if ( ways = = 1 )
ig = CXL_DECODER_MIN_GRANULARITY ;
2022-05-23 03:04:27 +03:00
cxld - > interleave_granularity = ig ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
rc = cxl_decoder_add ( cxld , target_map ) ;
if ( rc )
put_device ( & cxld - > dev ) ;
else
rc = cxl_decoder_autoremove ( dev , cxld ) ;
if ( rc ) {
2022-05-19 04:02:39 +03:00
dev_err ( dev , " Failed to add decode range [%#llx - %#llx] \n " ,
cxld - > hpa_range . start , cxld - > hpa_range . end ) ;
2021-10-29 22:51:48 +03:00
return 0 ;
2021-06-18 02:12:15 +03:00
}
2022-05-19 04:02:39 +03:00
dev_dbg ( dev , " add: %s node: %d range [%#llx - %#llx] \n " ,
dev_name ( & cxld - > dev ) ,
phys_to_target_node ( cxld - > hpa_range . start ) ,
cxld - > hpa_range . start , cxld - > hpa_range . end ) ;
2021-06-18 02:12:15 +03:00
2021-10-29 22:51:48 +03:00
return 0 ;
2022-07-13 04:37:54 +03:00
err_insert :
kfree ( res - > name ) ;
err_name :
kfree ( res ) ;
return - ENOMEM ;
2021-06-18 02:12:15 +03:00
}
2021-09-14 22:14:22 +03:00
__mock struct acpi_device * to_cxl_host_bridge ( struct device * host ,
struct device * dev )
2021-06-09 19:01:46 +03:00
{
struct acpi_device * adev = to_acpi_device ( dev ) ;
2021-09-04 05:20:39 +03:00
if ( ! acpi_pci_find_root ( adev - > handle ) )
return NULL ;
2021-06-09 19:01:46 +03:00
if ( strcmp ( acpi_device_hid ( adev ) , " ACPI0016 " ) = = 0 )
return adev ;
return NULL ;
}
2021-06-09 19:01:51 +03:00
/*
* A host bridge is a dport to a CFMWS decode and it is a uport to the
* dport ( PCIe Root Ports ) in the host bridge .
*/
static int add_host_bridge_uport ( struct device * match , void * arg )
{
struct cxl_port * root_port = arg ;
struct device * host = root_port - > dev . parent ;
2021-09-14 22:14:22 +03:00
struct acpi_device * bridge = to_cxl_host_bridge ( host , match ) ;
2021-06-09 19:01:51 +03:00
struct acpi_pci_root * pci_root ;
2021-06-18 02:12:15 +03:00
struct cxl_dport * dport ;
2021-06-09 19:01:51 +03:00
struct cxl_port * port ;
2022-02-01 23:24:30 +03:00
int rc ;
2021-06-09 19:01:51 +03:00
if ( ! bridge )
return 0 ;
2022-02-04 18:08:40 +03:00
dport = cxl_find_dport_by_dev ( root_port , match ) ;
2021-06-18 02:12:15 +03:00
if ( ! dport ) {
dev_dbg ( host , " host bridge expected and not found \n " ) ;
2021-10-08 00:34:26 +03:00
return 0 ;
2021-06-18 02:12:15 +03:00
}
2022-01-31 19:44:52 +03:00
/*
* Note that this lookup already succeeded in
* to_cxl_host_bridge ( ) , so no need to check for failure here
*/
pci_root = acpi_pci_find_root ( bridge - > handle ) ;
rc = devm_cxl_register_pci_bus ( host , match , pci_root - > bus ) ;
if ( rc )
return rc ;
2022-05-27 20:57:01 +03:00
port = devm_cxl_add_port ( host , match , dport - > component_reg_phys , dport ) ;
2021-06-09 19:01:51 +03:00
if ( IS_ERR ( port ) )
return PTR_ERR ( port ) ;
dev_dbg ( host , " %s: add: %s \n " , dev_name ( match ) , dev_name ( & port - > dev ) ) ;
2022-02-02 00:07:51 +03:00
return 0 ;
2021-06-09 19:01:51 +03:00
}
2021-10-29 22:51:48 +03:00
struct cxl_chbs_context {
2021-10-29 22:51:53 +03:00
struct device * dev ;
2021-10-29 22:51:48 +03:00
unsigned long long uid ;
resource_size_t chbcr ;
} ;
static int cxl_get_chbcr ( union acpi_subtable_headers * header , void * arg ,
const unsigned long end )
{
struct cxl_chbs_context * ctx = arg ;
struct acpi_cedt_chbs * chbs ;
if ( ctx - > chbcr )
return 0 ;
chbs = ( struct acpi_cedt_chbs * ) header ;
if ( ctx - > uid ! = chbs - > uid )
return 0 ;
ctx - > chbcr = chbs - > base ;
return 0 ;
}
2021-06-09 19:01:46 +03:00
static int add_host_bridge_dport ( struct device * match , void * arg )
{
acpi_status status ;
unsigned long long uid ;
2022-02-01 05:10:04 +03:00
struct cxl_dport * dport ;
2021-10-29 22:51:48 +03:00
struct cxl_chbs_context ctx ;
2021-06-09 19:01:46 +03:00
struct cxl_port * root_port = arg ;
struct device * host = root_port - > dev . parent ;
2021-09-14 22:14:22 +03:00
struct acpi_device * bridge = to_cxl_host_bridge ( host , match ) ;
2021-06-09 19:01:46 +03:00
if ( ! bridge )
return 0 ;
status = acpi_evaluate_integer ( bridge - > handle , METHOD_NAME__UID , NULL ,
& uid ) ;
if ( status ! = AE_OK ) {
dev_err ( host , " unable to retrieve _UID of %s \n " ,
dev_name ( match ) ) ;
return - ENODEV ;
}
2021-10-29 22:51:48 +03:00
ctx = ( struct cxl_chbs_context ) {
2021-10-29 22:51:53 +03:00
. dev = host ,
2021-10-29 22:51:48 +03:00
. uid = uid ,
} ;
acpi_table_parse_cedt ( ACPI_CEDT_TYPE_CHBS , cxl_get_chbcr , & ctx ) ;
if ( ctx . chbcr = = 0 ) {
2021-10-08 00:34:26 +03:00
dev_warn ( host , " No CHBS found for Host Bridge: %s \n " ,
dev_name ( match ) ) ;
return 0 ;
}
2021-06-18 02:12:15 +03:00
2022-02-02 00:23:14 +03:00
dport = devm_cxl_add_dport ( root_port , match , uid , ctx . chbcr ) ;
2022-02-01 05:10:04 +03:00
if ( IS_ERR ( dport ) ) {
2021-06-09 19:01:46 +03:00
dev_err ( host , " failed to add downstream port: %s \n " ,
dev_name ( match ) ) ;
2022-02-01 05:10:04 +03:00
return PTR_ERR ( dport ) ;
2021-06-09 19:01:46 +03:00
}
dev_dbg ( host , " add dport%llu: %s \n " , uid , dev_name ( match ) ) ;
return 0 ;
}
2021-06-16 02:18:17 +03:00
static int add_root_nvdimm_bridge ( struct device * match , void * data )
{
struct cxl_decoder * cxld ;
struct cxl_port * root_port = data ;
struct cxl_nvdimm_bridge * cxl_nvb ;
struct device * host = root_port - > dev . parent ;
if ( ! is_root_decoder ( match ) )
return 0 ;
cxld = to_cxl_decoder ( match ) ;
if ( ! ( cxld - > flags & CXL_DECODER_F_PMEM ) )
return 0 ;
cxl_nvb = devm_cxl_add_nvdimm_bridge ( host , root_port ) ;
if ( IS_ERR ( cxl_nvb ) ) {
dev_dbg ( host , " failed to register pmem \n " ) ;
return PTR_ERR ( cxl_nvb ) ;
}
dev_dbg ( host , " %s: add: %s \n " , dev_name ( & root_port - > dev ) ,
dev_name ( & cxl_nvb - > dev ) ) ;
return 1 ;
}
2022-04-26 22:22:44 +03:00
static struct lock_class_key cxl_root_key ;
static void cxl_acpi_lock_reset_class ( void * dev )
{
device_lock_reset_class ( dev ) ;
}
2022-07-13 04:37:54 +03:00
static void del_cxl_resource ( struct resource * res )
{
kfree ( res - > name ) ;
kfree ( res ) ;
}
static void cxl_set_public_resource ( struct resource * priv , struct resource * pub )
{
priv - > desc = ( unsigned long ) pub ;
}
static struct resource * cxl_get_public_resource ( struct resource * priv )
{
return ( struct resource * ) priv - > desc ;
}
static void remove_cxl_resources ( void * data )
{
struct resource * res , * next , * cxl = data ;
for ( res = cxl - > child ; res ; res = next ) {
struct resource * victim = cxl_get_public_resource ( res ) ;
next = res - > sibling ;
remove_resource ( res ) ;
if ( victim ) {
remove_resource ( victim ) ;
kfree ( victim ) ;
}
del_cxl_resource ( res ) ;
}
}
/**
* add_cxl_resources ( ) - reflect CXL fixed memory windows in iomem_resource
* @ cxl_res : A standalone resource tree where each CXL window is a sibling
*
* Walk each CXL window in @ cxl_res and add it to iomem_resource potentially
* expanding its boundaries to ensure that any conflicting resources become
* children . If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed . Consider this
* situation :
*
* | - - " CXL Window 0 " - - | | - - - - - " CXL Window 1 " - - - - - |
* | - - - - - - - - - - - - - - - " System RAM " - - - - - - - - - - - - - |
*
* . . . where platform firmware has established as System RAM resource across 2
* windows , but has left some portion of window 1 for dynamic CXL region
* provisioning . In this case " Window 0 " will span the entirety of the " System
* RAM " span, and " CXL Window 1 " is truncated to the remaining tail past the end
* of that " System RAM " resource .
*/
static int add_cxl_resources ( struct resource * cxl_res )
{
struct resource * res , * new , * next ;
for ( res = cxl_res - > child ; res ; res = next ) {
new = kzalloc ( sizeof ( * new ) , GFP_KERNEL ) ;
if ( ! new )
return - ENOMEM ;
new - > name = res - > name ;
new - > start = res - > start ;
new - > end = res - > end ;
new - > flags = IORESOURCE_MEM ;
new - > desc = IORES_DESC_CXL ;
/*
* Record the public resource in the private cxl_res tree for
* later removal .
*/
cxl_set_public_resource ( res , new ) ;
insert_resource_expand_to_fit ( & iomem_resource , new ) ;
next = res - > sibling ;
while ( next & & resource_overlaps ( new , next ) ) {
if ( resource_contains ( new , next ) ) {
struct resource * _next = next - > sibling ;
remove_resource ( next ) ;
del_cxl_resource ( next ) ;
next = _next ;
} else
next - > start = new - > end + 1 ;
}
}
return 0 ;
}
2022-07-13 04:38:26 +03:00
static int pair_cxl_resource ( struct device * dev , void * data )
{
struct resource * cxl_res = data ;
struct resource * p ;
if ( ! is_root_decoder ( dev ) )
return 0 ;
for ( p = cxl_res - > child ; p ; p = p - > sibling ) {
struct cxl_root_decoder * cxlrd = to_cxl_root_decoder ( dev ) ;
struct cxl_decoder * cxld = & cxlrd - > cxlsd . cxld ;
struct resource res = {
. start = cxld - > hpa_range . start ,
. end = cxld - > hpa_range . end ,
. flags = IORESOURCE_MEM ,
} ;
if ( resource_contains ( p , & res ) ) {
cxlrd - > res = cxl_get_public_resource ( p ) ;
break ;
}
}
return 0 ;
}
2021-06-09 19:01:35 +03:00
static int cxl_acpi_probe ( struct platform_device * pdev )
{
2021-06-09 19:01:51 +03:00
int rc ;
2022-07-13 04:37:54 +03:00
struct resource * cxl_res ;
2021-06-09 19:01:35 +03:00
struct cxl_port * root_port ;
struct device * host = & pdev - > dev ;
2021-06-09 19:01:46 +03:00
struct acpi_device * adev = ACPI_COMPANION ( host ) ;
2021-10-29 22:51:48 +03:00
struct cxl_cfmws_context ctx ;
2021-06-09 19:01:35 +03:00
2022-04-26 22:22:44 +03:00
device_lock_set_class ( & pdev - > dev , & cxl_root_key ) ;
rc = devm_add_action_or_reset ( & pdev - > dev , cxl_acpi_lock_reset_class ,
& pdev - > dev ) ;
if ( rc )
return rc ;
2022-07-13 04:37:54 +03:00
cxl_res = devm_kzalloc ( host , sizeof ( * cxl_res ) , GFP_KERNEL ) ;
if ( ! cxl_res )
return - ENOMEM ;
cxl_res - > name = " CXL mem " ;
cxl_res - > start = 0 ;
cxl_res - > end = - 1 ;
cxl_res - > flags = IORESOURCE_MEM ;
2021-06-09 19:01:35 +03:00
root_port = devm_cxl_add_port ( host , host , CXL_RESOURCE_NONE , NULL ) ;
if ( IS_ERR ( root_port ) )
return PTR_ERR ( root_port ) ;
dev_dbg ( host , " add: %s \n " , dev_name ( & root_port - > dev ) ) ;
2021-06-09 19:01:51 +03:00
rc = bus_for_each_dev ( adev - > dev . bus , NULL , root_port ,
add_host_bridge_dport ) ;
2021-10-29 22:51:48 +03:00
if ( rc < 0 )
return rc ;
2021-06-09 19:01:51 +03:00
2022-07-13 04:37:54 +03:00
rc = devm_add_action_or_reset ( host , remove_cxl_resources , cxl_res ) ;
if ( rc )
return rc ;
2021-10-29 22:51:48 +03:00
ctx = ( struct cxl_cfmws_context ) {
. dev = host ,
. root_port = root_port ,
2022-07-13 04:37:54 +03:00
. cxl_res = cxl_res ,
2021-10-29 22:51:48 +03:00
} ;
2022-07-13 04:37:54 +03:00
rc = acpi_table_parse_cedt ( ACPI_CEDT_TYPE_CFMWS , cxl_parse_cfmws , & ctx ) ;
if ( rc < 0 )
return - ENXIO ;
rc = add_cxl_resources ( cxl_res ) ;
if ( rc )
return rc ;
2021-06-18 02:12:16 +03:00
2022-07-13 04:38:26 +03:00
/*
* Populate the root decoders with their related iomem resource ,
* if present
*/
device_for_each_child ( & root_port - > dev , cxl_res , pair_cxl_resource ) ;
2021-06-09 19:01:51 +03:00
/*
* Root level scanned with host - bridge as dports , now scan host - bridges
* for their role as CXL uports to their CXL - capable PCIe Root Ports .
*/
2021-06-16 02:18:17 +03:00
rc = bus_for_each_dev ( adev - > dev . bus , NULL , root_port ,
add_host_bridge_uport ) ;
2021-10-29 22:51:48 +03:00
if ( rc < 0 )
return rc ;
2021-06-16 02:18:17 +03:00
if ( IS_ENABLED ( CONFIG_CXL_PMEM ) )
rc = device_for_each_child ( & root_port - > dev , root_port ,
add_root_nvdimm_bridge ) ;
if ( rc < 0 )
return rc ;
2021-10-29 22:51:48 +03:00
2022-02-04 18:18:31 +03:00
/* In case PCI is scanned before ACPI re-trigger memdev attach */
return cxl_bus_rescan ( ) ;
2021-06-09 19:01:35 +03:00
}
static const struct acpi_device_id cxl_acpi_ids [ ] = {
2021-10-29 22:51:48 +03:00
{ " ACPI0017 " } ,
2021-09-14 22:14:22 +03:00
{ } ,
2021-06-09 19:01:35 +03:00
} ;
MODULE_DEVICE_TABLE ( acpi , cxl_acpi_ids ) ;
2022-07-23 03:55:57 +03:00
static const struct platform_device_id cxl_test_ids [ ] = {
{ " cxl_acpi " } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( platform , cxl_test_ids ) ;
2021-06-09 19:01:35 +03:00
static struct platform_driver cxl_acpi_driver = {
. probe = cxl_acpi_probe ,
. driver = {
. name = KBUILD_MODNAME ,
. acpi_match_table = cxl_acpi_ids ,
} ,
2022-07-23 03:55:57 +03:00
. id_table = cxl_test_ids ,
2021-06-09 19:01:35 +03:00
} ;
module_platform_driver ( cxl_acpi_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_IMPORT_NS ( CXL ) ;
2021-10-29 22:51:48 +03:00
MODULE_IMPORT_NS ( ACPI ) ;