cxl/mem: Consolidate CXL DVSEC Range enumeration in the core
In preparation for fixing the setting of the 'mem_enabled' bit in CXL DVSEC Control register, move all CXL DVSEC range enumeration into the same source file. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/165291688886.1426646.15046138604010482084.stgit@dwillia2-xfh Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
2e4ba0ec97
commit
14d7887407
@ -142,3 +142,132 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
|
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
|
||||||
|
|
||||||
|
static int wait_for_valid(struct cxl_dev_state *cxlds)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
||||||
|
int d = cxlds->cxl_dvsec, rc;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
|
||||||
|
* and Size Low registers are valid. Must be set within 1 second of
|
||||||
|
* deassertion of reset to CXL device. Likely it is already set by the
|
||||||
|
* time this runs, but otherwise give a 1.5 second timeout in case of
|
||||||
|
* clock skew.
|
||||||
|
*/
|
||||||
|
rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (val & CXL_DVSEC_MEM_INFO_VALID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
msleep(1500);
|
||||||
|
|
||||||
|
rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (val & CXL_DVSEC_MEM_INFO_VALID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return positive number of non-zero ranges on success and a negative
|
||||||
|
* error code on failure. The cxl_mem driver depends on ranges == 0 to
|
||||||
|
* init HDM operation.
|
||||||
|
*/
|
||||||
|
int cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
|
||||||
|
struct cxl_endpoint_dvsec_info *info)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
||||||
|
int hdm_count, rc, i, ranges = 0;
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
int d = cxlds->cxl_dvsec;
|
||||||
|
u16 cap, ctrl;
|
||||||
|
|
||||||
|
if (!d) {
|
||||||
|
dev_dbg(dev, "No DVSEC Capability\n");
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
|
||||||
|
dev_dbg(dev, "Not MEM Capable\n");
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is not allowed by spec for MEM.capable to be set and have 0 legacy
|
||||||
|
* HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
|
||||||
|
* driver is for a spec defined class code which must be CXL.mem
|
||||||
|
* capable, there is no point in continuing to enable CXL.mem.
|
||||||
|
*/
|
||||||
|
hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
|
||||||
|
if (!hdm_count || hdm_count > 2)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
rc = wait_for_valid(cxlds);
|
||||||
|
if (rc) {
|
||||||
|
dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
|
||||||
|
|
||||||
|
for (i = 0; i < hdm_count; i++) {
|
||||||
|
u64 base, size;
|
||||||
|
u32 temp;
|
||||||
|
|
||||||
|
rc = pci_read_config_dword(
|
||||||
|
pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
size = (u64)temp << 32;
|
||||||
|
|
||||||
|
rc = pci_read_config_dword(
|
||||||
|
pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
|
||||||
|
|
||||||
|
rc = pci_read_config_dword(
|
||||||
|
pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
base = (u64)temp << 32;
|
||||||
|
|
||||||
|
rc = pci_read_config_dword(
|
||||||
|
pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
|
||||||
|
|
||||||
|
info->dvsec_range[i] = (struct range) {
|
||||||
|
.start = base,
|
||||||
|
.end = base + size - 1
|
||||||
|
};
|
||||||
|
|
||||||
|
if (size)
|
||||||
|
ranges++;
|
||||||
|
}
|
||||||
|
|
||||||
|
info->ranges = ranges;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_NS_GPL(cxl_dvsec_ranges, CXL);
|
||||||
|
@ -222,7 +222,6 @@ struct cxl_dev_state {
|
|||||||
u64 next_persistent_bytes;
|
u64 next_persistent_bytes;
|
||||||
|
|
||||||
resource_size_t component_reg_phys;
|
resource_size_t component_reg_phys;
|
||||||
struct cxl_endpoint_dvsec_info info;
|
|
||||||
u64 serial;
|
u64 serial;
|
||||||
|
|
||||||
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
|
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
|
||||||
|
@ -72,4 +72,8 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
|
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
|
||||||
|
struct cxl_dev_state;
|
||||||
|
struct cxl_endpoint_dvsec_info;
|
||||||
|
int cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
|
||||||
|
struct cxl_endpoint_dvsec_info *info);
|
||||||
#endif /* __CXL_PCI_H__ */
|
#endif /* __CXL_PCI_H__ */
|
||||||
|
@ -58,18 +58,15 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
|
|||||||
* decoders, or if it can not be determined if DVSEC Ranges are in use.
|
* decoders, or if it can not be determined if DVSEC Ranges are in use.
|
||||||
* Otherwise, returns true.
|
* Otherwise, returns true.
|
||||||
*/
|
*/
|
||||||
__mock bool cxl_hdm_decode_init(struct cxl_dev_state *cxlds)
|
__mock bool cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
|
||||||
|
struct cxl_endpoint_dvsec_info *info)
|
||||||
{
|
{
|
||||||
struct cxl_endpoint_dvsec_info *info = &cxlds->info;
|
|
||||||
struct cxl_register_map map;
|
struct cxl_register_map map;
|
||||||
struct cxl_component_reg_map *cmap = &map.component_map;
|
struct cxl_component_reg_map *cmap = &map.component_map;
|
||||||
bool global_enable, retval = false;
|
bool global_enable, retval = false;
|
||||||
void __iomem *crb;
|
void __iomem *crb;
|
||||||
u32 global_ctrl;
|
u32 global_ctrl;
|
||||||
|
|
||||||
if (info->ranges < 0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* map hdm decoder */
|
/* map hdm decoder */
|
||||||
crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
|
crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
|
||||||
if (!crb) {
|
if (!crb) {
|
||||||
@ -125,6 +122,7 @@ static void enable_suspend(void *data)
|
|||||||
static int cxl_mem_probe(struct device *dev)
|
static int cxl_mem_probe(struct device *dev)
|
||||||
{
|
{
|
||||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||||
|
struct cxl_endpoint_dvsec_info info = { 0 };
|
||||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||||
struct cxl_port *parent_port;
|
struct cxl_port *parent_port;
|
||||||
int rc;
|
int rc;
|
||||||
@ -165,6 +163,10 @@ unlock:
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
rc = cxl_dvsec_ranges(cxlds, &info);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
rc = cxl_await_media_ready(cxlds);
|
rc = cxl_await_media_ready(cxlds);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Media not active (%d)\n", rc);
|
dev_err(dev, "Media not active (%d)\n", rc);
|
||||||
@ -175,7 +177,7 @@ unlock:
|
|||||||
* If DVSEC ranges are being used instead of HDM decoder registers there
|
* If DVSEC ranges are being used instead of HDM decoder registers there
|
||||||
* is no use in trying to manage those.
|
* is no use in trying to manage those.
|
||||||
*/
|
*/
|
||||||
if (!cxl_hdm_decode_init(cxlds)) {
|
if (!cxl_hdm_decode_init(cxlds, &info)) {
|
||||||
dev_err(dev,
|
dev_err(dev,
|
||||||
"Legacy range registers configuration prevents HDM operation.\n");
|
"Legacy range registers configuration prevents HDM operation.\n");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -386,139 +386,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wait_for_valid(struct cxl_dev_state *cxlds)
|
|
||||||
{
|
|
||||||
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
|
||||||
int d = cxlds->cxl_dvsec, rc;
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
|
|
||||||
* and Size Low registers are valid. Must be set within 1 second of
|
|
||||||
* deassertion of reset to CXL device. Likely it is already set by the
|
|
||||||
* time this runs, but otherwise give a 1.5 second timeout in case of
|
|
||||||
* clock skew.
|
|
||||||
*/
|
|
||||||
rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
if (val & CXL_DVSEC_MEM_INFO_VALID)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
msleep(1500);
|
|
||||||
|
|
||||||
rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
if (val & CXL_DVSEC_MEM_INFO_VALID)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return positive number of non-zero ranges on success and a negative
|
|
||||||
* error code on failure. The cxl_mem driver depends on ranges == 0 to
|
|
||||||
* init HDM operation.
|
|
||||||
*/
|
|
||||||
static int __cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
|
|
||||||
struct cxl_endpoint_dvsec_info *info)
|
|
||||||
{
|
|
||||||
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
|
||||||
int hdm_count, rc, i, ranges = 0;
|
|
||||||
struct device *dev = &pdev->dev;
|
|
||||||
int d = cxlds->cxl_dvsec;
|
|
||||||
u16 cap, ctrl;
|
|
||||||
|
|
||||||
if (!d) {
|
|
||||||
dev_dbg(dev, "No DVSEC Capability\n");
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
|
|
||||||
dev_dbg(dev, "Not MEM Capable\n");
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It is not allowed by spec for MEM.capable to be set and have 0 legacy
|
|
||||||
* HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
|
|
||||||
* driver is for a spec defined class code which must be CXL.mem
|
|
||||||
* capable, there is no point in continuing to enable CXL.mem.
|
|
||||||
*/
|
|
||||||
hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
|
|
||||||
if (!hdm_count || hdm_count > 2)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
rc = wait_for_valid(cxlds);
|
|
||||||
if (rc) {
|
|
||||||
dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
|
|
||||||
|
|
||||||
for (i = 0; i < hdm_count; i++) {
|
|
||||||
u64 base, size;
|
|
||||||
u32 temp;
|
|
||||||
|
|
||||||
rc = pci_read_config_dword(
|
|
||||||
pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
size = (u64)temp << 32;
|
|
||||||
|
|
||||||
rc = pci_read_config_dword(
|
|
||||||
pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
|
|
||||||
|
|
||||||
rc = pci_read_config_dword(
|
|
||||||
pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
base = (u64)temp << 32;
|
|
||||||
|
|
||||||
rc = pci_read_config_dword(
|
|
||||||
pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
|
|
||||||
|
|
||||||
info->dvsec_range[i] = (struct range) {
|
|
||||||
.start = base,
|
|
||||||
.end = base + size - 1
|
|
||||||
};
|
|
||||||
|
|
||||||
if (size)
|
|
||||||
ranges++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ranges;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cxl_dvsec_ranges(struct cxl_dev_state *cxlds)
|
|
||||||
{
|
|
||||||
struct cxl_endpoint_dvsec_info *info = &cxlds->info;
|
|
||||||
|
|
||||||
info->ranges = __cxl_dvsec_ranges(cxlds, info);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
{
|
{
|
||||||
struct cxl_register_map map;
|
struct cxl_register_map map;
|
||||||
@ -583,8 +450,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
cxl_dvsec_ranges(cxlds);
|
|
||||||
|
|
||||||
cxlmd = devm_cxl_add_memdev(cxlds);
|
cxlmd = devm_cxl_add_memdev(cxlds);
|
||||||
if (IS_ERR(cxlmd))
|
if (IS_ERR(cxlmd))
|
||||||
return PTR_ERR(cxlmd);
|
return PTR_ERR(cxlmd);
|
||||||
|
@ -9,6 +9,7 @@ ldflags-y += --wrap=devm_cxl_setup_hdm
|
|||||||
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
|
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
|
||||||
ldflags-y += --wrap=devm_cxl_enumerate_decoders
|
ldflags-y += --wrap=devm_cxl_enumerate_decoders
|
||||||
ldflags-y += --wrap=cxl_await_media_ready
|
ldflags-y += --wrap=cxl_await_media_ready
|
||||||
|
ldflags-y += --wrap=cxl_dvsec_ranges
|
||||||
|
|
||||||
DRIVERS := ../../../drivers
|
DRIVERS := ../../../drivers
|
||||||
CXL_SRC := $(DRIVERS)/cxl
|
CXL_SRC := $(DRIVERS)/cxl
|
||||||
|
@ -242,14 +242,6 @@ static void label_area_release(void *lsa)
|
|||||||
vfree(lsa);
|
vfree(lsa);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mock_validate_dvsec_ranges(struct cxl_dev_state *cxlds)
|
|
||||||
{
|
|
||||||
struct cxl_endpoint_dvsec_info *info;
|
|
||||||
|
|
||||||
info = &cxlds->info;
|
|
||||||
info->mem_enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cxl_mock_mem_probe(struct platform_device *pdev)
|
static int cxl_mock_mem_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
@ -286,8 +278,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
mock_validate_dvsec_ranges(cxlds);
|
|
||||||
|
|
||||||
cxlmd = devm_cxl_add_memdev(cxlds);
|
cxlmd = devm_cxl_add_memdev(cxlds);
|
||||||
if (IS_ERR(cxlmd))
|
if (IS_ERR(cxlmd))
|
||||||
return PTR_ERR(cxlmd);
|
return PTR_ERR(cxlmd);
|
||||||
|
@ -208,6 +208,22 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
|
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
|
||||||
|
|
||||||
|
int __wrap_cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
|
||||||
|
struct cxl_endpoint_dvsec_info *info)
|
||||||
|
{
|
||||||
|
int rc = 0, index;
|
||||||
|
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
|
||||||
|
|
||||||
|
if (ops && ops->is_mock_dev(cxlds->dev))
|
||||||
|
info->mem_enabled = 1;
|
||||||
|
else
|
||||||
|
rc = cxl_dvsec_ranges(cxlds, info);
|
||||||
|
put_cxl_mock_ops(index);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_ranges, CXL);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_IMPORT_NS(ACPI);
|
MODULE_IMPORT_NS(ACPI);
|
||||||
MODULE_IMPORT_NS(CXL);
|
MODULE_IMPORT_NS(CXL);
|
||||||
|
Loading…
Reference in New Issue
Block a user