2022-01-31 18:10:04 -08:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
2022-05-18 16:34:43 -07:00
# include <linux/io-64-nonatomic-lo-hi.h>
2022-01-31 18:10:04 -08:00
# include <linux/device.h>
2022-05-18 16:34:43 -07:00
# include <linux/delay.h>
2022-01-31 18:10:04 -08:00
# include <linux/pci.h>
# include <cxlpci.h>
2022-05-18 16:34:43 -07:00
# include <cxlmem.h>
2022-01-31 18:10:04 -08:00
# include <cxl.h>
# include "core.h"
/**
* DOC : cxl core pci
*
* Compute Express Link protocols are layered on top of PCIe . CXL core provides
* a set of helpers for CXL interactions which occur via PCIe .
*/
2022-05-18 16:34:43 -07:00
static unsigned short media_ready_timeout = 60 ;
module_param ( media_ready_timeout , ushort , 0644 ) ;
MODULE_PARM_DESC ( media_ready_timeout , " seconds to wait for media ready " ) ;
2022-01-31 18:10:04 -08:00
struct cxl_walk_context {
struct pci_bus * bus ;
struct cxl_port * port ;
int type ;
int error ;
int count ;
} ;
static int match_add_dports ( struct pci_dev * pdev , void * data )
{
struct cxl_walk_context * ctx = data ;
struct cxl_port * port = ctx - > port ;
int type = pci_pcie_type ( pdev ) ;
struct cxl_register_map map ;
struct cxl_dport * dport ;
u32 lnkcap , port_num ;
int rc ;
if ( pdev - > bus ! = ctx - > bus )
return 0 ;
if ( ! pci_is_pcie ( pdev ) )
return 0 ;
if ( type ! = ctx - > type )
return 0 ;
if ( pci_read_config_dword ( pdev , pci_pcie_cap ( pdev ) + PCI_EXP_LNKCAP ,
& lnkcap ) )
return 0 ;
rc = cxl_find_regblock ( pdev , CXL_REGLOC_RBI_COMPONENT , & map ) ;
if ( rc )
dev_dbg ( & port - > dev , " failed to find component registers \n " ) ;
port_num = FIELD_GET ( PCI_EXP_LNKCAP_PN , lnkcap ) ;
2022-02-01 13:23:14 -08:00
dport = devm_cxl_add_dport ( port , & pdev - > dev , port_num ,
2022-01-31 18:10:04 -08:00
cxl_regmap_to_base ( pdev , & map ) ) ;
if ( IS_ERR ( dport ) ) {
ctx - > error = PTR_ERR ( dport ) ;
return PTR_ERR ( dport ) ;
}
ctx - > count + + ;
dev_dbg ( & port - > dev , " add dport%d: %s \n " , port_num , dev_name ( & pdev - > dev ) ) ;
return 0 ;
}
/**
* devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
* @ port : cxl_port whose - > uport is the upstream of dports to be enumerated
*
* Returns a positive number of dports enumerated or a negative error
* code .
*/
2022-02-01 13:23:14 -08:00
int devm_cxl_port_enumerate_dports ( struct cxl_port * port )
2022-01-31 18:10:04 -08:00
{
struct pci_bus * bus = cxl_port_to_pci_bus ( port ) ;
struct cxl_walk_context ctx ;
int type ;
if ( ! bus )
return - ENXIO ;
if ( pci_is_root_bus ( bus ) )
type = PCI_EXP_TYPE_ROOT_PORT ;
else
type = PCI_EXP_TYPE_DOWNSTREAM ;
ctx = ( struct cxl_walk_context ) {
. port = port ,
. bus = bus ,
. type = type ,
} ;
pci_walk_bus ( bus , match_add_dports , & ctx ) ;
if ( ctx . count = = 0 )
return - ENODEV ;
if ( ctx . error )
return ctx . error ;
return ctx . count ;
}
EXPORT_SYMBOL_NS_GPL ( devm_cxl_port_enumerate_dports , CXL ) ;
2022-05-18 16:34:43 -07:00
/*
* Wait up to @ media_ready_timeout for the device to report memory
* active .
*/
int cxl_await_media_ready ( struct cxl_dev_state * cxlds )
{
struct pci_dev * pdev = to_pci_dev ( cxlds - > dev ) ;
int d = cxlds - > cxl_dvsec ;
bool active = false ;
u64 md_status ;
int rc , i ;
for ( i = media_ready_timeout ; i ; i - - ) {
u32 temp ;
rc = pci_read_config_dword (
pdev , d + CXL_DVSEC_RANGE_SIZE_LOW ( 0 ) , & temp ) ;
if ( rc )
return rc ;
active = FIELD_GET ( CXL_DVSEC_MEM_ACTIVE , temp ) ;
if ( active )
break ;
msleep ( 1000 ) ;
}
if ( ! active ) {
dev_err ( & pdev - > dev ,
" timeout awaiting memory active after %d seconds \n " ,
media_ready_timeout ) ;
return - ETIMEDOUT ;
}
md_status = readq ( cxlds - > regs . memdev + CXLMDEV_STATUS_OFFSET ) ;
if ( ! CXLMDEV_READY ( md_status ) )
return - EIO ;
return 0 ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_await_media_ready , CXL ) ;
2022-05-18 16:34:48 -07:00
static int wait_for_valid ( struct cxl_dev_state * cxlds )
{
struct pci_dev * pdev = to_pci_dev ( cxlds - > dev ) ;
int d = cxlds - > cxl_dvsec , rc ;
u32 val ;
/*
* Memory_Info_Valid : When set , indicates that the CXL Range 1 Size high
* and Size Low registers are valid . Must be set within 1 second of
* deassertion of reset to CXL device . Likely it is already set by the
* time this runs , but otherwise give a 1.5 second timeout in case of
* clock skew .
*/
rc = pci_read_config_dword ( pdev , d + CXL_DVSEC_RANGE_SIZE_LOW ( 0 ) , & val ) ;
if ( rc )
return rc ;
if ( val & CXL_DVSEC_MEM_INFO_VALID )
return 0 ;
msleep ( 1500 ) ;
rc = pci_read_config_dword ( pdev , d + CXL_DVSEC_RANGE_SIZE_LOW ( 0 ) , & val ) ;
if ( rc )
return rc ;
if ( val & CXL_DVSEC_MEM_INFO_VALID )
return 0 ;
return - ETIMEDOUT ;
}
/*
* Return positive number of non - zero ranges on success and a negative
* error code on failure . The cxl_mem driver depends on ranges = = 0 to
* init HDM operation .
*/
int cxl_dvsec_ranges ( struct cxl_dev_state * cxlds ,
struct cxl_endpoint_dvsec_info * info )
{
struct pci_dev * pdev = to_pci_dev ( cxlds - > dev ) ;
int hdm_count , rc , i , ranges = 0 ;
struct device * dev = & pdev - > dev ;
int d = cxlds - > cxl_dvsec ;
u16 cap , ctrl ;
if ( ! d ) {
dev_dbg ( dev , " No DVSEC Capability \n " ) ;
return - ENXIO ;
}
rc = pci_read_config_word ( pdev , d + CXL_DVSEC_CAP_OFFSET , & cap ) ;
if ( rc )
return rc ;
rc = pci_read_config_word ( pdev , d + CXL_DVSEC_CTRL_OFFSET , & ctrl ) ;
if ( rc )
return rc ;
if ( ! ( cap & CXL_DVSEC_MEM_CAPABLE ) ) {
dev_dbg ( dev , " Not MEM Capable \n " ) ;
return - ENXIO ;
}
/*
* It is not allowed by spec for MEM . capable to be set and have 0 legacy
* HDM decoders ( values > 2 are also undefined as of CXL 2.0 ) . As this
* driver is for a spec defined class code which must be CXL . mem
* capable , there is no point in continuing to enable CXL . mem .
*/
hdm_count = FIELD_GET ( CXL_DVSEC_HDM_COUNT_MASK , cap ) ;
if ( ! hdm_count | | hdm_count > 2 )
return - EINVAL ;
rc = wait_for_valid ( cxlds ) ;
if ( rc ) {
dev_dbg ( dev , " Failure awaiting MEM_INFO_VALID (%d) \n " , rc ) ;
return rc ;
}
info - > mem_enabled = FIELD_GET ( CXL_DVSEC_MEM_ENABLE , ctrl ) ;
2022-05-18 16:34:54 -07:00
if ( ! info - > mem_enabled )
return 0 ;
2022-05-18 16:34:48 -07:00
for ( i = 0 ; i < hdm_count ; i + + ) {
u64 base , size ;
u32 temp ;
rc = pci_read_config_dword (
pdev , d + CXL_DVSEC_RANGE_SIZE_HIGH ( i ) , & temp ) ;
if ( rc )
return rc ;
size = ( u64 ) temp < < 32 ;
rc = pci_read_config_dword (
pdev , d + CXL_DVSEC_RANGE_SIZE_LOW ( i ) , & temp ) ;
if ( rc )
return rc ;
size | = temp & CXL_DVSEC_MEM_SIZE_LOW_MASK ;
rc = pci_read_config_dword (
pdev , d + CXL_DVSEC_RANGE_BASE_HIGH ( i ) , & temp ) ;
if ( rc )
return rc ;
base = ( u64 ) temp < < 32 ;
rc = pci_read_config_dword (
pdev , d + CXL_DVSEC_RANGE_BASE_LOW ( i ) , & temp ) ;
if ( rc )
return rc ;
base | = temp & CXL_DVSEC_MEM_BASE_LOW_MASK ;
info - > dvsec_range [ i ] = ( struct range ) {
. start = base ,
. end = base + size - 1
} ;
if ( size )
ranges + + ;
}
info - > ranges = ranges ;
return 0 ;
}
EXPORT_SYMBOL_NS_GPL ( cxl_dvsec_ranges , CXL ) ;