Arm SCMI fixes for v6.8
Few fixes addressing the below issues: 1. A spurious IRQ related to the late reply can get wrongly associated with the new enqueued request resulting in misinterpretation of data in shared memory. This race-condition can be detected by looking at the channel status bits which the platform must set to the channel free before triggering the completion IRQ. Adding a consistency check to validate such condition will fix the issue. 2. Incorrect use of asm-generic/bug.h instead of generic linux/bUg.h 3. xa_store() can't check for possible duplication insertion, use xa_insert() instead 4. Fix the SCMI clock protocol version in the v3.2 SCMI specification 5. Incorrect upgrade of highest supported clock protocol version from v2.0 to v3.0 -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEunHlEgbzHrJD3ZPhAEG6vDF+4pgFAmWuk5gACgkQAEG6vDF+ 4pi7/BAArnlvrL8pkakS6MvuWbEsWG1ZPoR1Zc/8QqCHV7eYs7TWJKl4/zRFxvqv vYODFekhLS1pdVzZcmtgI5fHp1WW7vDV8Fe93FtrFnqUnd3RQWD0VwBgOqaIypmg C4KS5I6nxcB6jBBcus/L1kRYIH3ut0hgZ2Yw941hc5Dj38zewnwdzHi4eMm5NKKh bPUpUsbVpoaGuTD6esXxQve1hkqNddpEfJa798Y2ZooKmxCRpiTui/XwqcrV0704 hQ4A3wP4OcaWlE2YaxXf71sOpr8hdlM+i3B/4R6k4LzdyBVAwqP8VzqftN/xvovq lR02TDssBwY0vQjtOUsuk0zlrYkC42itOl0v4sKQky8V32yhSzbUwf640CFFlfw2 NhZd4gtib+S+jQgRPHl2E4XV+lUIO9svAyNkCnDMR+TLOiDvNg+WDcTc4zcllhNQ 6MUMVgoIN6X33aCxpF4HsWBi8bYps3+b1757iMjxyUaJ2mREGLEW+7afXciXBlph MuvNrUrWJqMpgaYIPVhNjTs9Z2kv2aO7dyGivA54vCcN4+BCVajQ2tv+ox4Mv1ZI biChLGX62qyzeAolpc/mKUEESaIdOUHjGt1ezRm+kM2hMLx+7oae2V2E08ZZr4SW 7UO6rAy+fWU4C2yLFdpV9gpAeWRl2GBu+6zhVd0FWKFIwh6tucM= =TZaY -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEiK/NIGsWEZVxh/FrYKtH/8kJUicFAmWymJ8ACgkQYKtH/8kJ UieylA/9FmykJhh/ZCqoJ7D0UiuKLnWz1vZIlJj4apoCA/fwVdftz2v7WgdikZui ujVX2idXwHuaGapYzVQGB602AjVbsmJoOK8q+KmAzMdaLbefBoS84GiDvCIsEpc2 OF5Lof7U9mBbL+A62OZBvzWX3qk/98vTahEmXfSi0soMrs41K1EiYpuy2jJAU+a2 ehMbrtd4Sly/EGdgLDlFN1HVIvnUr6LcQbvIbNR45l+s2wWr6qIAjvjPZ4efEOkI 39VVL6Zvmb0pGTLi8qZTGvh9JYyoJryWKOpAy6OjJ3hlDlKjIpluhjca080W+2zP wTPZlgrSUNkKkkyZsxr1c8hWGmEjjJnJJv5lJVIzt+C0FuEf246JS+DFifDhBhnc R4LuA2X6avwL3hbYnJMcnaSxamnefvvJjavsRWeyh1/y4e9cEOR28Ex0RULzqgNw p4Il7J6/NqDlxS71Gc+qtx5UPWkpNZp9xwrveyCDPwAiyOpvSigiKEHe2e7m/vUL FMTtEMf6o6Cd8ypKl9SFgaRq4NGKH1QvCBtz5RhRhUcHAucZtJ8bRNqcyofyS9Sf F9O3nE+OGJu8qCluoGHtFwc0PLXlWvxVSNBQXQuLk7mMlbIbndk7kMfRztNjshps L6EK1Hr6+/RqBFzCuV3DlbRpnrBMxRj3ExVUCDOxnKDETeeIboM= =aJcM -----END PGP SIGNATURE----- Merge tag 'scmi-fixes-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes Arm SCMI fixes for v6.8 Few fixes addressing the below issues: 1. A spurious IRQ related to the late reply can get wrongly associated with the new enqueued request resulting in misinterpretation of data in shared memory. This race-condition can be detected by looking at the channel status bits which the platform must set to the channel free before triggering the completion IRQ. Adding a consistency check to validate such condition will fix the issue. 2. Incorrect use of asm-generic/bug.h instead of generic linux/bUg.h 3. xa_store() can't check for possible duplication insertion, use xa_insert() instead 4. Fix the SCMI clock protocol version in the v3.2 SCMI specification 5. Incorrect upgrade of highest supported clock protocol version from v2.0 to v3.0 * tag 'scmi-fixes-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: firmware: arm_scmi: Fix the clock protocol supported version firmware: arm_scmi: Fix the clock protocol version for v3.2 firmware: arm_scmi: Use xa_insert() when saving raw queues firmware: arm_scmi: Use xa_insert() to store opps firmware: arm_scmi: Replace asm-generic/bug.h with linux/bug.h firmware: arm_scmi: Check mailbox/SMT channel for consistency Link: https://lore.kernel.org/r/20240122161640.3551085-1-sudeep.holla@arm.com Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
d77b016bc9
@ -13,7 +13,7 @@
|
||||
#include "notify.h"
|
||||
|
||||
/* Updated only after ALL the mandatory features for that version are merged */
|
||||
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
|
||||
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
|
||||
|
||||
enum scmi_clock_protocol_cmd {
|
||||
CLOCK_ATTRIBUTES = 0x3,
|
||||
@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
|
||||
scmi_clock_describe_rates_get(ph, clkid, clk);
|
||||
}
|
||||
|
||||
if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
|
||||
PROTOCOL_REV_MINOR(version) >= 0x1) {
|
||||
if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
|
||||
cinfo->clock_config_set = scmi_clock_config_set_v2;
|
||||
cinfo->clock_config_get = scmi_clock_config_get_v2;
|
||||
} else {
|
||||
|
@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
|
||||
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
|
||||
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer);
|
||||
bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
|
||||
|
||||
/* declarations for message passing transports */
|
||||
struct scmi_msg_payld;
|
||||
|
@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
|
||||
{
|
||||
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
|
||||
|
||||
/*
|
||||
* An A2P IRQ is NOT valid when received while the platform still has
|
||||
* the ownership of the channel, because the platform at first releases
|
||||
* the SMT channel and then sends the completion interrupt.
|
||||
*
|
||||
* This addresses a possible race condition in which a spurious IRQ from
|
||||
* a previous timed-out reply which arrived late could be wrongly
|
||||
* associated with the next pending transaction.
|
||||
*/
|
||||
if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
|
||||
dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
|
||||
return;
|
||||
}
|
||||
|
||||
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
|
||||
}
|
||||
|
||||
|
@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
|
||||
}
|
||||
|
||||
static inline void
|
||||
process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
|
||||
unsigned int loop_idx,
|
||||
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
|
||||
struct scmi_opp *opp, unsigned int loop_idx,
|
||||
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
|
||||
{
|
||||
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
|
||||
@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
|
||||
/* Note that PERF v4 reports always five 32-bit words */
|
||||
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
|
||||
if (dom->level_indexing_mode) {
|
||||
int ret;
|
||||
|
||||
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
|
||||
|
||||
xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
|
||||
xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
|
||||
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
dev_warn(dev,
|
||||
"Failed to add opps_by_idx at %d - ret:%d\n",
|
||||
opp->level_index, ret);
|
||||
|
||||
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
|
||||
if (ret)
|
||||
dev_warn(dev,
|
||||
"Failed to add opps_by_lvl at %d - ret:%d\n",
|
||||
opp->perf, ret);
|
||||
|
||||
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
|
||||
}
|
||||
}
|
||||
@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
|
||||
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
|
||||
process_response_opp(opp, st->loop_idx, response);
|
||||
else
|
||||
process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
|
||||
process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
|
||||
response);
|
||||
p->perf_dom->opp_count++;
|
||||
|
||||
|
@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_chans; i++) {
|
||||
void *xret;
|
||||
struct scmi_raw_queue *q;
|
||||
|
||||
q = scmi_raw_queue_init(raw);
|
||||
@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
|
||||
goto err_xa;
|
||||
}
|
||||
|
||||
xret = xa_store(&raw->chans_q, channels[i], q,
|
||||
ret = xa_insert(&raw->chans_q, channels[i], q,
|
||||
GFP_KERNEL);
|
||||
if (xa_err(xret)) {
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"Fail to allocate Raw queue 0x%02X\n",
|
||||
channels[i]);
|
||||
ret = xa_err(xret);
|
||||
goto err_xa;
|
||||
}
|
||||
}
|
||||
@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
|
||||
dev = raw->handle->dev;
|
||||
q = scmi_raw_queue_select(raw, idx,
|
||||
SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
|
||||
if (!q) {
|
||||
dev_warn(dev,
|
||||
"RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
|
||||
idx, chan_id);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab the msg_q_lock upfront to avoid a possible race between
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/processor.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
|
||||
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
}
|
||||
|
||||
bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
|
||||
{
|
||||
return (ioread32(&shmem->channel_status) &
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user