2022-02-02 00:07:51 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
# include <linux/device.h>
# include <linux/module.h>
# include <linux/slab.h>
# include "cxlmem.h"
# include "cxlpci.h"
/**
* DOC : cxl port
*
* The port driver enumerates dport via PCI and scans for HDM
* ( Host - managed - Device - Memory ) decoder resources via the
* @ component_reg_phys value passed in by the agent that registered the
* port . All descendant ports of a CXL root port ( described by platform
* firmware ) are managed in this drivers context . Each driver instance
* is responsible for tearing down the driver context of immediate
* descendant ports . The locking for this is validated by
* CONFIG_PROVE_CXL_LOCKING .
*
* The primary service this driver provides is presenting APIs to other
* drivers to utilize the decoders , and indicating to userspace ( via bind
* status ) the connectivity of the CXL . mem protocol throughout the
* PCIe topology .
*/
2022-02-04 18:18:31 +03:00
static void schedule_detach ( void * cxlmd )
{
schedule_cxl_memdev_detach ( cxlmd ) ;
}
2023-02-11 04:31:17 +03:00
static int discover_region ( struct device * dev , void * root )
{
struct cxl_endpoint_decoder * cxled ;
int rc ;
if ( ! is_endpoint_decoder ( dev ) )
return 0 ;
cxled = to_cxl_endpoint_decoder ( dev ) ;
if ( ( cxled - > cxld . flags & CXL_DECODER_F_ENABLE ) = = 0 )
return 0 ;
if ( cxled - > state ! = CXL_DECODER_STATE_AUTO )
return 0 ;
/*
* Region enumeration is opportunistic , if this add - event fails ,
* continue to the next endpoint decoder .
*/
rc = cxl_add_to_region ( root , cxled ) ;
if ( rc )
dev_dbg ( dev , " failed to add to region: %#llx-%#llx \n " ,
cxled - > cxld . hpa_range . start , cxled - > cxld . hpa_range . end ) ;
return 0 ;
}
2023-02-10 12:06:33 +03:00
static int cxl_switch_port_probe ( struct cxl_port * port )
2022-02-02 00:07:51 +03:00
{
struct cxl_hdm * cxlhdm ;
2023-06-15 22:53:40 +03:00
int rc ;
2022-05-19 02:35:17 +03:00
2023-10-12 21:53:48 +03:00
/* Cache the data early to ensure is_visible() works */
read_cdat_data ( port ) ;
2023-06-15 22:53:40 +03:00
rc = devm_cxl_port_enumerate_dports ( port ) ;
if ( rc < 0 )
2023-05-18 06:19:43 +03:00
return rc ;
2023-12-22 01:03:26 +03:00
cxl_switch_parse_cdat ( port ) ;
2023-06-15 22:53:40 +03:00
cxlhdm = devm_cxl_setup_hdm ( port , NULL ) ;
2023-04-14 21:54:11 +03:00
if ( ! IS_ERR ( cxlhdm ) )
return devm_cxl_enumerate_decoders ( cxlhdm , NULL ) ;
if ( PTR_ERR ( cxlhdm ) ! = - ENODEV ) {
dev_err ( & port - > dev , " Failed to map HDM decoder capability \n " ) ;
2022-05-19 02:35:17 +03:00
return PTR_ERR ( cxlhdm ) ;
2023-04-14 21:54:11 +03:00
}
2023-06-15 22:53:40 +03:00
if ( rc = = 1 ) {
2023-04-14 21:54:11 +03:00
dev_dbg ( & port - > dev , " Fallback to passthrough decoder \n " ) ;
return devm_cxl_add_passthrough_decoder ( port ) ;
}
2022-05-19 02:35:17 +03:00
2023-04-14 21:54:11 +03:00
dev_err ( & port - > dev , " HDM decoder capability not found \n " ) ;
return - ENXIO ;
2023-02-10 12:06:33 +03:00
}
2022-02-04 18:18:31 +03:00
2023-02-10 12:06:33 +03:00
static int cxl_endpoint_port_probe ( struct cxl_port * port )
{
2023-04-04 00:33:48 +03:00
struct cxl_endpoint_dvsec_info info = { . port = port } ;
2023-06-22 23:55:01 +03:00
struct cxl_memdev * cxlmd = to_cxl_memdev ( port - > uport_dev ) ;
2023-02-10 12:06:33 +03:00
struct cxl_dev_state * cxlds = cxlmd - > cxlds ;
struct cxl_hdm * cxlhdm ;
2023-02-11 04:31:17 +03:00
struct cxl_port * root ;
2023-02-10 12:06:33 +03:00
int rc ;
2022-07-19 23:52:49 +03:00
2023-02-15 03:06:10 +03:00
rc = cxl_dvsec_rr_decode ( cxlds - > dev , cxlds - > cxl_dvsec , & info ) ;
if ( rc < 0 )
return rc ;
2022-05-19 02:35:17 +03:00
2023-02-14 22:41:30 +03:00
cxlhdm = devm_cxl_setup_hdm ( port , & info ) ;
2023-06-22 23:55:07 +03:00
if ( IS_ERR ( cxlhdm ) ) {
if ( PTR_ERR ( cxlhdm ) = = - ENODEV )
dev_err ( & port - > dev , " HDM decoder registers not found \n " ) ;
2023-02-10 12:06:33 +03:00
return PTR_ERR ( cxlhdm ) ;
2023-06-22 23:55:07 +03:00
}
2022-05-19 02:35:11 +03:00
2023-02-10 12:06:33 +03:00
/* Cache the data early to ensure is_visible() works */
read_cdat_data ( port ) ;
2023-12-22 01:03:13 +03:00
cxl_endpoint_parse_cdat ( port ) ;
2022-05-19 02:35:11 +03:00
2023-02-10 12:06:33 +03:00
get_device ( & cxlmd - > dev ) ;
rc = devm_add_action_or_reset ( & port - > dev , schedule_detach , cxlmd ) ;
if ( rc )
return rc ;
2022-02-04 18:18:31 +03:00
2023-02-15 03:06:10 +03:00
rc = cxl_hdm_decode_init ( cxlds , cxlhdm , & info ) ;
2023-02-10 12:06:33 +03:00
if ( rc )
return rc ;
2023-02-14 22:41:24 +03:00
rc = devm_cxl_enumerate_decoders ( cxlhdm , & info ) ;
2023-02-11 04:31:17 +03:00
if ( rc )
return rc ;
/*
* This can ' t fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe ( )
*/
2024-01-06 01:07:59 +03:00
struct cxl_root * cxl_root __free ( put_cxl_root ) = find_cxl_root ( port ) ;
2024-01-06 01:07:40 +03:00
root = & cxl_root - > port ;
2023-02-11 04:31:17 +03:00
/*
* Now that all endpoint decoders are successfully enumerated , try to
* assemble regions from committed decoders
*/
device_for_each_child ( & port - > dev , root , discover_region ) ;
return 0 ;
2023-02-10 12:06:33 +03:00
}
static int cxl_port_probe ( struct device * dev )
{
struct cxl_port * port = to_cxl_port ( dev ) ;
if ( is_cxl_endpoint ( port ) )
return cxl_endpoint_port_probe ( port ) ;
return cxl_switch_port_probe ( port ) ;
2022-02-02 00:07:51 +03:00
}
2022-07-19 23:52:49 +03:00
static ssize_t CDAT_read ( struct file * filp , struct kobject * kobj ,
struct bin_attribute * bin_attr , char * buf ,
loff_t offset , size_t count )
{
struct device * dev = kobj_to_dev ( kobj ) ;
struct cxl_port * port = to_cxl_port ( dev ) ;
if ( ! port - > cdat_available )
return - ENXIO ;
if ( ! port - > cdat . table )
return 0 ;
return memory_read_from_buffer ( buf , count , & offset ,
port - > cdat . table ,
port - > cdat . length ) ;
}
static BIN_ATTR_ADMIN_RO ( CDAT , 0 ) ;
static umode_t cxl_port_bin_attr_is_visible ( struct kobject * kobj ,
struct bin_attribute * attr , int i )
{
struct device * dev = kobj_to_dev ( kobj ) ;
struct cxl_port * port = to_cxl_port ( dev ) ;
if ( ( attr = = & bin_attr_CDAT ) & & port - > cdat_available )
return attr - > attr . mode ;
return 0 ;
}
static struct bin_attribute * cxl_cdat_bin_attributes [ ] = {
& bin_attr_CDAT ,
NULL ,
} ;
static struct attribute_group cxl_cdat_attribute_group = {
. bin_attrs = cxl_cdat_bin_attributes ,
. is_bin_visible = cxl_port_bin_attr_is_visible ,
} ;
static const struct attribute_group * cxl_port_attribute_groups [ ] = {
& cxl_cdat_attribute_group ,
NULL ,
} ;
2022-02-02 00:07:51 +03:00
static struct cxl_driver cxl_port_driver = {
. name = " cxl_port " ,
. probe = cxl_port_probe ,
. id = CXL_DEVICE_PORT ,
2022-07-19 23:52:49 +03:00
. drv = {
. dev_groups = cxl_port_attribute_groups ,
} ,
2022-02-02 00:07:51 +03:00
} ;
module_cxl_driver ( cxl_port_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_IMPORT_NS ( CXL ) ;
MODULE_ALIAS_CXL ( CXL_DEVICE_PORT ) ;