drm/dp_mst: Fix flushing the delayed port/mstb destroy work

Atm, a pending delayed destroy work during module removal will be
canceled, leaving behind MST ports, mstbs. Fix this by using a dedicated
workqueue which will be drained of requeued items as well when
destroying it.

v2:
- Check if wq is NULL before calling destroy_workqueue().

Cc: Lyude Paul <lyude@redhat.com>
Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200610134704.25270-1-imre.deak@intel.com
This commit is contained in:
Imre Deak 2020-06-10 16:47:04 +03:00
parent d8bd15b37d
commit 72822c3bfa
2 changed files with 24 additions and 3 deletions

View File

@ -1630,7 +1630,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
mutex_lock(&mgr->delayed_destroy_lock); mutex_lock(&mgr->delayed_destroy_lock);
list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
mutex_unlock(&mgr->delayed_destroy_lock); mutex_unlock(&mgr->delayed_destroy_lock);
schedule_work(&mgr->delayed_destroy_work); queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
} }
/** /**
@ -1747,7 +1747,7 @@ static void drm_dp_destroy_port(struct kref *kref)
mutex_lock(&mgr->delayed_destroy_lock); mutex_lock(&mgr->delayed_destroy_lock);
list_add(&port->next, &mgr->destroy_port_list); list_add(&port->next, &mgr->destroy_port_list);
mutex_unlock(&mgr->delayed_destroy_lock); mutex_unlock(&mgr->delayed_destroy_lock);
schedule_work(&mgr->delayed_destroy_work); queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
} }
/** /**
@ -5203,6 +5203,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
INIT_LIST_HEAD(&mgr->destroy_port_list); INIT_LIST_HEAD(&mgr->destroy_port_list);
INIT_LIST_HEAD(&mgr->destroy_branch_device_list); INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
INIT_LIST_HEAD(&mgr->up_req_list); INIT_LIST_HEAD(&mgr->up_req_list);
/*
* delayed_destroy_work will be queued on a dedicated WQ, so that any
* requeuing will be also flushed when deiniting the topology manager.
*/
mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
if (mgr->delayed_destroy_wq == NULL)
return -ENOMEM;
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
@ -5247,7 +5256,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{ {
drm_dp_mst_topology_mgr_set_mst(mgr, false); drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work); flush_work(&mgr->work);
cancel_work_sync(&mgr->delayed_destroy_work); /* The following will also drain any requeued work on the WQ. */
if (mgr->delayed_destroy_wq) {
destroy_workqueue(mgr->delayed_destroy_wq);
mgr->delayed_destroy_wq = NULL;
}
mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads); kfree(mgr->payloads);
mgr->payloads = NULL; mgr->payloads = NULL;

View File

@ -681,6 +681,14 @@ struct drm_dp_mst_topology_mgr {
* @destroy_branch_device_list. * @destroy_branch_device_list.
*/ */
struct mutex delayed_destroy_lock; struct mutex delayed_destroy_lock;
/**
* @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
* A dedicated WQ makes it possible to drain any requeued work items
* on it.
*/
struct workqueue_struct *delayed_destroy_wq;
/** /**
* @delayed_destroy_work: Work item to destroy MST port and branch * @delayed_destroy_work: Work item to destroy MST port and branch
* devices, needed to avoid locking inversion. * devices, needed to avoid locking inversion.