drm/amdkfd: CRIU restore sdma id for queues
When re-creating queues during CRIU restore, restore the queue with the same sdma id value used during CRIU dump. Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: David Yat Sin <david.yatsin@amd.com> Signed-off-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8668dfc30d
commit
2485c12c98
@ -58,7 +58,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
|
||||
struct queue *q);
|
||||
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
|
||||
static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
||||
struct queue *q);
|
||||
struct queue *q, const uint32_t *restore_sdma_id);
|
||||
static void kfd_process_hw_exception(struct work_struct *work);
|
||||
|
||||
static inline
|
||||
@ -299,7 +299,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
|
||||
|
||||
static int create_queue_nocpsch(struct device_queue_manager *dqm,
|
||||
struct queue *q,
|
||||
struct qcm_process_device *qpd)
|
||||
struct qcm_process_device *qpd,
|
||||
const struct kfd_criu_queue_priv_data *qd)
|
||||
{
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int retval;
|
||||
@ -339,7 +340,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
|
||||
q->pipe, q->queue);
|
||||
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
|
||||
retval = allocate_sdma_queue(dqm, q);
|
||||
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
|
||||
if (retval)
|
||||
goto deallocate_vmid;
|
||||
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
|
||||
@ -1034,7 +1035,7 @@ static void pre_reset(struct device_queue_manager *dqm)
|
||||
}
|
||||
|
||||
static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
||||
struct queue *q)
|
||||
struct queue *q, const uint32_t *restore_sdma_id)
|
||||
{
|
||||
int bit;
|
||||
|
||||
@ -1044,9 +1045,21 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bit = __ffs64(dqm->sdma_bitmap);
|
||||
dqm->sdma_bitmap &= ~(1ULL << bit);
|
||||
q->sdma_id = bit;
|
||||
if (restore_sdma_id) {
|
||||
/* Re-use existing sdma_id */
|
||||
if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
|
||||
pr_err("SDMA queue already in use\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
|
||||
q->sdma_id = *restore_sdma_id;
|
||||
} else {
|
||||
/* Find first available sdma_id */
|
||||
bit = __ffs64(dqm->sdma_bitmap);
|
||||
dqm->sdma_bitmap &= ~(1ULL << bit);
|
||||
q->sdma_id = bit;
|
||||
}
|
||||
|
||||
q->properties.sdma_engine_id = q->sdma_id %
|
||||
kfd_get_num_sdma_engines(dqm->dev);
|
||||
q->properties.sdma_queue_id = q->sdma_id /
|
||||
@ -1056,9 +1069,19 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
||||
pr_err("No more XGMI SDMA queue to allocate\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
bit = __ffs64(dqm->xgmi_sdma_bitmap);
|
||||
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
|
||||
q->sdma_id = bit;
|
||||
if (restore_sdma_id) {
|
||||
/* Re-use existing sdma_id */
|
||||
if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
|
||||
pr_err("SDMA queue already in use\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
|
||||
q->sdma_id = *restore_sdma_id;
|
||||
} else {
|
||||
bit = __ffs64(dqm->xgmi_sdma_bitmap);
|
||||
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
|
||||
q->sdma_id = bit;
|
||||
}
|
||||
/* sdma_engine_id is sdma id including
|
||||
* both PCIe-optimized SDMAs and XGMI-
|
||||
* optimized SDMAs. The calculation below
|
||||
@ -1288,7 +1311,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||
}
|
||||
|
||||
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
||||
struct qcm_process_device *qpd)
|
||||
struct qcm_process_device *qpd,
|
||||
const struct kfd_criu_queue_priv_data *qd)
|
||||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
@ -1303,7 +1327,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
|
||||
dqm_lock(dqm);
|
||||
retval = allocate_sdma_queue(dqm, q);
|
||||
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
|
||||
dqm_unlock(dqm);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
@ -88,7 +88,8 @@ struct device_process_node {
|
||||
struct device_queue_manager_ops {
|
||||
int (*create_queue)(struct device_queue_manager *dqm,
|
||||
struct queue *q,
|
||||
struct qcm_process_device *qpd);
|
||||
struct qcm_process_device *qpd,
|
||||
const struct kfd_criu_queue_priv_data *qd);
|
||||
|
||||
int (*destroy_queue)(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd,
|
||||
|
@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
|
||||
goto err_create_queue;
|
||||
pqn->q = q;
|
||||
pqn->kq = NULL;
|
||||
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
|
||||
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data);
|
||||
print_queue(q);
|
||||
break;
|
||||
|
||||
@ -292,7 +292,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
|
||||
goto err_create_queue;
|
||||
pqn->q = q;
|
||||
pqn->kq = NULL;
|
||||
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
|
||||
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data);
|
||||
print_queue(q);
|
||||
break;
|
||||
case KFD_QUEUE_TYPE_DIQ:
|
||||
|
Loading…
x
Reference in New Issue
Block a user