cxl/hdm: Track next decoder to allocate
The CXL specification enforces that endpoint decoders are committed in hw instance id order. In preparation for adding dynamic DPA allocation, record the hw instance id in endpoint decoders, and enforce allocations to occur in hw instance id order. Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/165784328827.1758207.9627538529944559954.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
2c8669033f
commit
0c33b39352
@ -160,6 +160,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
|
||||
static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_port *port = cxled_to_port(cxled);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct resource *res = cxled->dpa_res;
|
||||
resource_size_t skip_start;
|
||||
@ -173,6 +174,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
|
||||
__release_region(&cxlds->dpa_res, skip_start, cxled->skip);
|
||||
cxled->skip = 0;
|
||||
cxled->dpa_res = NULL;
|
||||
port->hdm_end--;
|
||||
}
|
||||
|
||||
static void cxl_dpa_release(void *cxled)
|
||||
@ -203,6 +205,18 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (port->hdm_end + 1 != cxled->cxld.id) {
|
||||
/*
|
||||
* Assumes alloc and commit order is always in hardware instance
|
||||
* order per expectations from 8.2.5.12.20 Committing Decoder
|
||||
* Programming that enforce decoder[m] committed before
|
||||
* decoder[m+1] commit start.
|
||||
*/
|
||||
dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
|
||||
cxled->cxld.id, port->id, port->hdm_end + 1);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (skipped) {
|
||||
res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
|
||||
dev_name(&cxled->cxld.dev), 0);
|
||||
@ -235,6 +249,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
|
||||
cxled->cxld.id, cxled->dpa_res);
|
||||
cxled->mode = CXL_DECODER_MIXED;
|
||||
}
|
||||
port->hdm_end++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -502,6 +502,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
|
||||
|
||||
port->component_reg_phys = component_reg_phys;
|
||||
ida_init(&port->decoder_ida);
|
||||
port->hdm_end = -1;
|
||||
INIT_LIST_HEAD(&port->dports);
|
||||
INIT_LIST_HEAD(&port->endpoints);
|
||||
|
||||
|
@ -333,6 +333,7 @@ struct cxl_nvdimm {
|
||||
* @dports: cxl_dport instances referenced by decoders
|
||||
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port
|
||||
* @decoder_ida: allocator for decoder ids
|
||||
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
|
||||
* @component_reg_phys: component register capability base address (optional)
|
||||
* @dead: last ep has been removed, force port re-creation
|
||||
* @depth: How deep this port is relative to the root. depth 0 is the root.
|
||||
@ -347,6 +348,7 @@ struct cxl_port {
|
||||
struct list_head dports;
|
||||
struct list_head endpoints;
|
||||
struct ida decoder_ida;
|
||||
int hdm_end;
|
||||
resource_size_t component_reg_phys;
|
||||
bool dead;
|
||||
unsigned int depth;
|
||||
|
Loading…
Reference in New Issue
Block a user