x86, UV: Modularize BAU send and wait

Streamline the large uv_flush_send_and_wait() function by use of
a couple of helper functions.

And remove some excess comments.

Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004ay-IH@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Cliff Wickman 2010-06-02 16:22:02 -05:00 committed by Ingo Molnar
parent 450a007eeb
commit f6d8a56693
2 changed files with 44 additions and 39 deletions

View File

@ -75,7 +75,6 @@
#define DESC_STATUS_DESTINATION_TIMEOUT 2 #define DESC_STATUS_DESTINATION_TIMEOUT 2
#define DESC_STATUS_SOURCE_TIMEOUT 3 #define DESC_STATUS_SOURCE_TIMEOUT 3
#define TIMEOUT_DELAY 10
/* /*
* delay for 'plugged' timeout retries, in microseconds * delay for 'plugged' timeout retries, in microseconds
*/ */

View File

@ -484,6 +484,47 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
return 1; return 1;
} }
/*
* Our retries are blocked by all destination swack resources being
* in use, and a timeout is pending. In that case hardware immediately
* returns the ERROR that looks like a destination timeout.
*/
static void
destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
struct bau_control *hmaster, struct ptc_stats *stat)
{
udelay(bcp->plugged_delay);
bcp->plugged_tries++;
if (bcp->plugged_tries >= bcp->plugsb4reset) {
bcp->plugged_tries = 0;
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
bcp->ipi_attempts++;
stat->s_resets_plug++;
}
}
static void
destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
struct bau_control *hmaster, struct ptc_stats *stat)
{
hmaster->max_bau_concurrent = 1;
bcp->timeout_tries++;
if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
bcp->timeout_tries = 0;
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
bcp->ipi_attempts++;
stat->s_resets_timeout++;
}
}
/* /*
* Completions are taking a very long time due to a congested numalink * Completions are taking a very long time due to a congested numalink
* network. * network.
@ -518,7 +559,7 @@ disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
* *
* Send a broadcast and wait for it to complete. * Send a broadcast and wait for it to complete.
* *
* The flush_mask contains the cpus the broadcast is to be sent to, plus * The flush_mask contains the cpus the broadcast is to be sent to including
* cpus that are on the local uvhub. * cpus that are on the local uvhub.
* *
* Returns 0 if all flushing represented in the mask was done. * Returns 0 if all flushing represented in the mask was done.
@ -553,7 +594,6 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
&hmaster->active_descriptor_count, &hmaster->active_descriptor_count,
hmaster->max_bau_concurrent)); hmaster->max_bau_concurrent));
} }
while (hmaster->uvhub_quiesce) while (hmaster->uvhub_quiesce)
cpu_relax(); cpu_relax();
@ -584,40 +624,9 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
right_shift, this_cpu, bcp, smaster, try); right_shift, this_cpu, bcp, smaster, try);
if (completion_status == FLUSH_RETRY_PLUGGED) { if (completion_status == FLUSH_RETRY_PLUGGED) {
/* destination_plugged(bau_desc, bcp, hmaster, stat);
* Our retries may be blocked by all destination swack
* resources being consumed, and a timeout pending. In
* that case hardware immediately returns the ERROR
* that looks like a destination timeout.
*/
udelay(bcp->plugged_delay);
bcp->plugged_tries++;
if (bcp->plugged_tries >= bcp->plugsb4reset) {
bcp->plugged_tries = 0;
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
uv_reset_with_ipi(&bau_desc->distribution,
this_cpu);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
bcp->ipi_attempts++;
stat->s_resets_plug++;
}
} else if (completion_status == FLUSH_RETRY_TIMEOUT) { } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
hmaster->max_bau_concurrent = 1; destination_timeout(bau_desc, bcp, hmaster, stat);
bcp->timeout_tries++;
udelay(TIMEOUT_DELAY);
if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
bcp->timeout_tries = 0;
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
uv_reset_with_ipi(&bau_desc->distribution,
this_cpu);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
bcp->ipi_attempts++;
stat->s_resets_timeout++;
}
} }
if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
bcp->ipi_attempts = 0; bcp->ipi_attempts = 0;
@ -628,10 +637,8 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
} while ((completion_status == FLUSH_RETRY_PLUGGED) || } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
(completion_status == FLUSH_RETRY_TIMEOUT)); (completion_status == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles(); time2 = get_cycles();
bcp->plugged_tries = 0; bcp->plugged_tries = 0;
bcp->timeout_tries = 0; bcp->timeout_tries = 0;
if ((completion_status == FLUSH_COMPLETE) && if ((completion_status == FLUSH_COMPLETE) &&
(bcp->conseccompletes > bcp->complete_threshold) && (bcp->conseccompletes > bcp->complete_threshold) &&
(hmaster->max_bau_concurrent < (hmaster->max_bau_concurrent <
@ -740,7 +747,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
bau_desc = bcp->descriptor_base; bau_desc = bcp->descriptor_base;
bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
/* cpu statistics */ /* cpu statistics */