USB: EHCI: unlink one async QH at a time
This patch (as1648) fixes a regression affecting nVidia EHCI controllers. Evidently they don't like to have more than one async QH unlinked at a time. I can't imagine how they manage to mess it up, but at least one of them does. The patch changes the async unlink logic in two ways: Each time an IAA cycle is started, only the first QH on the async unlink list is handled (rather than all of them). Async QHs do not all get unlinked as soon as they have been empty for long enough. Instead, only the last one (i.e., the one that has been on the schedule the longest) is unlinked, and then only if no other unlinks are in progress at the time. This means that when multiple QHs are empty, they won't be unlinked as quickly as before. That's okay; it won't affect correct operation of the driver or add an excessive load. Multiple unlinks tend to be relatively rare in any case. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Reported-and-tested-by: Piergiorgio Sartor <piergiorgio.sartor@nexgo.de> Cc: stable <stable@vger.kernel.org> # 3.6 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
55bcdce8a8
commit
6e0c3339a6
@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
|
|||||||
if (ehci->async_iaa || ehci->async_unlinking)
|
if (ehci->async_iaa || ehci->async_unlinking)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Do all the waiting QHs at once */
|
|
||||||
ehci->async_iaa = ehci->async_unlink;
|
|
||||||
ehci->async_unlink = NULL;
|
|
||||||
|
|
||||||
/* If the controller isn't running, we don't have to wait for it */
|
/* If the controller isn't running, we don't have to wait for it */
|
||||||
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
|
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
|
||||||
|
|
||||||
|
/* Do all the waiting QHs */
|
||||||
|
ehci->async_iaa = ehci->async_unlink;
|
||||||
|
ehci->async_unlink = NULL;
|
||||||
|
|
||||||
if (!nested) /* Avoid recursion */
|
if (!nested) /* Avoid recursion */
|
||||||
end_unlink_async(ehci);
|
end_unlink_async(ehci);
|
||||||
|
|
||||||
/* Otherwise start a new IAA cycle */
|
/* Otherwise start a new IAA cycle */
|
||||||
} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
|
} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
|
||||||
|
struct ehci_qh *qh;
|
||||||
|
|
||||||
|
/* Do only the first waiting QH (nVidia bug?) */
|
||||||
|
qh = ehci->async_unlink;
|
||||||
|
ehci->async_iaa = qh;
|
||||||
|
ehci->async_unlink = qh->unlink_next;
|
||||||
|
qh->unlink_next = NULL;
|
||||||
|
|
||||||
/* Make sure the unlinks are all visible to the hardware */
|
/* Make sure the unlinks are all visible to the hardware */
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
|
||||||
|
|
||||||
static void unlink_empty_async(struct ehci_hcd *ehci)
|
static void unlink_empty_async(struct ehci_hcd *ehci)
|
||||||
{
|
{
|
||||||
struct ehci_qh *qh, *next;
|
struct ehci_qh *qh;
|
||||||
bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
|
struct ehci_qh *qh_to_unlink = NULL;
|
||||||
bool check_unlinks_later = false;
|
bool check_unlinks_later = false;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
/* Unlink all the async QHs that have been empty for a timer cycle */
|
/* Find the last async QH which has been empty for a timer cycle */
|
||||||
next = ehci->async->qh_next.qh;
|
for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
|
||||||
while (next) {
|
|
||||||
qh = next;
|
|
||||||
next = qh->qh_next.qh;
|
|
||||||
|
|
||||||
if (list_empty(&qh->qtd_list) &&
|
if (list_empty(&qh->qtd_list) &&
|
||||||
qh->qh_state == QH_STATE_LINKED) {
|
qh->qh_state == QH_STATE_LINKED) {
|
||||||
if (!stopped && qh->unlink_cycle ==
|
++count;
|
||||||
ehci->async_unlink_cycle)
|
if (qh->unlink_cycle == ehci->async_unlink_cycle)
|
||||||
check_unlinks_later = true;
|
check_unlinks_later = true;
|
||||||
else
|
else
|
||||||
single_unlink_async(ehci, qh);
|
qh_to_unlink = qh;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Start a new IAA cycle if any QHs are waiting for it */
|
/* If nothing else is being unlinked, unlink the last empty QH */
|
||||||
if (ehci->async_unlink)
|
if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
|
||||||
start_iaa_cycle(ehci, false);
|
start_unlink_async(ehci, qh_to_unlink);
|
||||||
|
--count;
|
||||||
|
}
|
||||||
|
|
||||||
/* QHs that haven't been empty for long enough will be handled later */
|
/* Other QHs will be handled later */
|
||||||
if (check_unlinks_later) {
|
if (count > 0) {
|
||||||
ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
|
ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
|
||||||
++ehci->async_unlink_cycle;
|
++ehci->async_unlink_cycle;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user