|
|
|
@ -48,7 +48,6 @@
|
|
|
|
|
struct cppc_pcc_data {
|
|
|
|
|
struct mbox_chan *pcc_channel;
|
|
|
|
|
void __iomem *pcc_comm_addr;
|
|
|
|
|
int pcc_subspace_idx;
|
|
|
|
|
bool pcc_channel_acquired;
|
|
|
|
|
ktime_t deadline;
|
|
|
|
|
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
|
|
|
|
@ -75,13 +74,16 @@ struct cppc_pcc_data {
|
|
|
|
|
|
|
|
|
|
/* Wait queue for CPUs whose requests were batched */
|
|
|
|
|
wait_queue_head_t pcc_write_wait_q;
|
|
|
|
|
ktime_t last_cmd_cmpl_time;
|
|
|
|
|
ktime_t last_mpar_reset;
|
|
|
|
|
int mpar_count;
|
|
|
|
|
int refcount;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Structure to represent the single PCC channel */
|
|
|
|
|
static struct cppc_pcc_data pcc_data = {
|
|
|
|
|
.pcc_subspace_idx = -1,
|
|
|
|
|
.platform_owns_pcc = true,
|
|
|
|
|
};
|
|
|
|
|
/* Array to represent the PCC channel per subspace id */
|
|
|
|
|
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
|
|
|
|
|
/* The cpu_pcc_subspace_idx containsper CPU subspace id */
|
|
|
|
|
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The cpc_desc structure contains the ACPI register details
|
|
|
|
@ -93,7 +95,8 @@ static struct cppc_pcc_data pcc_data = {
|
|
|
|
|
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
|
|
|
|
|
|
|
|
|
|
/* pcc mapped address + header size + offset within PCC subspace */
|
|
|
|
|
#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
|
|
|
|
|
#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
|
|
|
|
|
0x8 + (offs))
|
|
|
|
|
|
|
|
|
|
/* Check if a CPC register is in PCC */
|
|
|
|
|
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
|
|
|
|
@ -188,13 +191,16 @@ static struct kobj_type cppc_ktype = {
|
|
|
|
|
.default_attrs = cppc_attrs,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int check_pcc_chan(bool chk_err_bit)
|
|
|
|
|
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
|
|
|
|
|
{
|
|
|
|
|
int ret = -EIO, status = 0;
|
|
|
|
|
struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
|
|
|
|
|
ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
|
|
|
|
|
pcc_ss_data->pcc_comm_addr;
|
|
|
|
|
ktime_t next_deadline = ktime_add(ktime_get(),
|
|
|
|
|
pcc_ss_data->deadline);
|
|
|
|
|
|
|
|
|
|
if (!pcc_data.platform_owns_pcc)
|
|
|
|
|
if (!pcc_ss_data->platform_owns_pcc)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Retry in case the remote processor was too slow to catch up. */
|
|
|
|
@ -219,7 +225,7 @@ static int check_pcc_chan(bool chk_err_bit)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (likely(!ret))
|
|
|
|
|
pcc_data.platform_owns_pcc = false;
|
|
|
|
|
pcc_ss_data->platform_owns_pcc = false;
|
|
|
|
|
else
|
|
|
|
|
pr_err("PCC check channel failed. Status=%x\n", status);
|
|
|
|
|
|
|
|
|
@ -230,13 +236,12 @@ static int check_pcc_chan(bool chk_err_bit)
|
|
|
|
|
* This function transfers the ownership of the PCC to the platform
|
|
|
|
|
* So it must be called while holding write_lock(pcc_lock)
|
|
|
|
|
*/
|
|
|
|
|
static int send_pcc_cmd(u16 cmd)
|
|
|
|
|
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
|
|
|
|
|
{
|
|
|
|
|
int ret = -EIO, i;
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
struct acpi_pcct_shared_memory *generic_comm_base =
|
|
|
|
|
(struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
|
|
|
|
|
static ktime_t last_cmd_cmpl_time, last_mpar_reset;
|
|
|
|
|
static int mpar_count;
|
|
|
|
|
(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
|
|
|
|
|
unsigned int time_delta;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -249,24 +254,25 @@ static int send_pcc_cmd(u16 cmd)
|
|
|
|
|
* before write completion, so first send a WRITE command to
|
|
|
|
|
* platform
|
|
|
|
|
*/
|
|
|
|
|
if (pcc_data.pending_pcc_write_cmd)
|
|
|
|
|
send_pcc_cmd(CMD_WRITE);
|
|
|
|
|
if (pcc_ss_data->pending_pcc_write_cmd)
|
|
|
|
|
send_pcc_cmd(pcc_ss_id, CMD_WRITE);
|
|
|
|
|
|
|
|
|
|
ret = check_pcc_chan(false);
|
|
|
|
|
ret = check_pcc_chan(pcc_ss_id, false);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto end;
|
|
|
|
|
} else /* CMD_WRITE */
|
|
|
|
|
pcc_data.pending_pcc_write_cmd = FALSE;
|
|
|
|
|
pcc_ss_data->pending_pcc_write_cmd = FALSE;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Handle the Minimum Request Turnaround Time(MRTT)
|
|
|
|
|
* "The minimum amount of time that OSPM must wait after the completion
|
|
|
|
|
* of a command before issuing the next command, in microseconds"
|
|
|
|
|
*/
|
|
|
|
|
if (pcc_data.pcc_mrtt) {
|
|
|
|
|
time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
|
|
|
|
|
if (pcc_data.pcc_mrtt > time_delta)
|
|
|
|
|
udelay(pcc_data.pcc_mrtt - time_delta);
|
|
|
|
|
if (pcc_ss_data->pcc_mrtt) {
|
|
|
|
|
time_delta = ktime_us_delta(ktime_get(),
|
|
|
|
|
pcc_ss_data->last_cmd_cmpl_time);
|
|
|
|
|
if (pcc_ss_data->pcc_mrtt > time_delta)
|
|
|
|
|
udelay(pcc_ss_data->pcc_mrtt - time_delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -280,18 +286,19 @@ static int send_pcc_cmd(u16 cmd)
|
|
|
|
|
* not send the request to the platform after hitting the MPAR limit in
|
|
|
|
|
* any 60s window
|
|
|
|
|
*/
|
|
|
|
|
if (pcc_data.pcc_mpar) {
|
|
|
|
|
if (mpar_count == 0) {
|
|
|
|
|
time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
|
|
|
|
|
if (time_delta < 60 * MSEC_PER_SEC) {
|
|
|
|
|
if (pcc_ss_data->pcc_mpar) {
|
|
|
|
|
if (pcc_ss_data->mpar_count == 0) {
|
|
|
|
|
time_delta = ktime_ms_delta(ktime_get(),
|
|
|
|
|
pcc_ss_data->last_mpar_reset);
|
|
|
|
|
if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
|
|
|
|
|
pr_debug("PCC cmd not sent due to MPAR limit");
|
|
|
|
|
ret = -EIO;
|
|
|
|
|
goto end;
|
|
|
|
|
}
|
|
|
|
|
last_mpar_reset = ktime_get();
|
|
|
|
|
mpar_count = pcc_data.pcc_mpar;
|
|
|
|
|
pcc_ss_data->last_mpar_reset = ktime_get();
|
|
|
|
|
pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
|
|
|
|
|
}
|
|
|
|
|
mpar_count--;
|
|
|
|
|
pcc_ss_data->mpar_count--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Write to the shared comm region. */
|
|
|
|
@ -300,10 +307,10 @@ static int send_pcc_cmd(u16 cmd)
|
|
|
|
|
/* Flip CMD COMPLETE bit */
|
|
|
|
|
writew_relaxed(0, &generic_comm_base->status);
|
|
|
|
|
|
|
|
|
|
pcc_data.platform_owns_pcc = true;
|
|
|
|
|
pcc_ss_data->platform_owns_pcc = true;
|
|
|
|
|
|
|
|
|
|
/* Ring doorbell */
|
|
|
|
|
ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
|
|
|
|
|
ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
|
|
|
|
|
cmd, ret);
|
|
|
|
@ -311,15 +318,15 @@ static int send_pcc_cmd(u16 cmd)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* wait for completion and check for PCC errro bit */
|
|
|
|
|
ret = check_pcc_chan(true);
|
|
|
|
|
ret = check_pcc_chan(pcc_ss_id, true);
|
|
|
|
|
|
|
|
|
|
if (pcc_data.pcc_mrtt)
|
|
|
|
|
last_cmd_cmpl_time = ktime_get();
|
|
|
|
|
if (pcc_ss_data->pcc_mrtt)
|
|
|
|
|
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
|
|
|
|
|
|
|
|
|
|
if (pcc_data.pcc_channel->mbox->txdone_irq)
|
|
|
|
|
mbox_chan_txdone(pcc_data.pcc_channel, ret);
|
|
|
|
|
if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
|
|
|
|
|
mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
|
|
|
|
|
else
|
|
|
|
|
mbox_client_txdone(pcc_data.pcc_channel, ret);
|
|
|
|
|
mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
|
|
|
|
|
|
|
|
|
|
end:
|
|
|
|
|
if (cmd == CMD_WRITE) {
|
|
|
|
@ -329,12 +336,12 @@ end:
|
|
|
|
|
if (!desc)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
|
|
|
|
|
if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
|
|
|
|
|
desc->write_cmd_status = ret;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pcc_data.pcc_write_cnt++;
|
|
|
|
|
wake_up_all(&pcc_data.pcc_write_wait_q);
|
|
|
|
|
pcc_ss_data->pcc_write_cnt++;
|
|
|
|
|
wake_up_all(&pcc_ss_data->pcc_write_wait_q);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
@ -536,16 +543,16 @@ err_ret:
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
|
|
|
|
|
|
|
|
|
|
static int register_pcc_channel(int pcc_subspace_idx)
|
|
|
|
|
static int register_pcc_channel(int pcc_ss_idx)
|
|
|
|
|
{
|
|
|
|
|
struct acpi_pcct_hw_reduced *cppc_ss;
|
|
|
|
|
u64 usecs_lat;
|
|
|
|
|
|
|
|
|
|
if (pcc_subspace_idx >= 0) {
|
|
|
|
|
pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
|
|
|
|
|
pcc_subspace_idx);
|
|
|
|
|
if (pcc_ss_idx >= 0) {
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_channel =
|
|
|
|
|
pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(pcc_data.pcc_channel)) {
|
|
|
|
|
if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
|
|
|
|
|
pr_err("Failed to find PCC communication channel\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
@ -556,7 +563,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
|
|
|
|
|
* PCC channels) and stored pointers to the
|
|
|
|
|
* subspace communication region in con_priv.
|
|
|
|
|
*/
|
|
|
|
|
cppc_ss = (pcc_data.pcc_channel)->con_priv;
|
|
|
|
|
cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
|
|
|
|
|
|
|
|
|
|
if (!cppc_ss) {
|
|
|
|
|
pr_err("No PCC subspace found for CPPC\n");
|
|
|
|
@ -569,19 +576,20 @@ static int register_pcc_channel(int pcc_subspace_idx)
|
|
|
|
|
* So add an arbitrary amount of wait on top of Nominal.
|
|
|
|
|
*/
|
|
|
|
|
usecs_lat = NUM_RETRIES * cppc_ss->latency;
|
|
|
|
|
pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
|
|
|
|
|
pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
|
|
|
|
|
pcc_data.pcc_mpar = cppc_ss->max_access_rate;
|
|
|
|
|
pcc_data.pcc_nominal = cppc_ss->latency;
|
|
|
|
|
pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
|
|
|
|
|
|
|
|
|
|
pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
|
|
|
|
|
if (!pcc_data.pcc_comm_addr) {
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_comm_addr =
|
|
|
|
|
acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
|
|
|
|
|
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
|
|
|
|
|
pr_err("Failed to ioremap PCC comm region mem\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set flag so that we dont come here for each CPU. */
|
|
|
|
|
pcc_data.pcc_channel_acquired = true;
|
|
|
|
|
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@ -600,6 +608,34 @@ bool __weak cpc_ffh_supported(void)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
|
|
|
|
|
*
|
|
|
|
|
* Check and allocate the cppc_pcc_data memory.
|
|
|
|
|
* In some processor configurations it is possible that same subspace
|
|
|
|
|
* is shared between multiple CPU's. This is seen especially in CPU's
|
|
|
|
|
* with hardware multi-threading support.
|
|
|
|
|
*
|
|
|
|
|
* Return: 0 for success, errno for failure
|
|
|
|
|
*/
|
|
|
|
|
int pcc_data_alloc(int pcc_ss_id)
|
|
|
|
|
{
|
|
|
|
|
if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (pcc_data[pcc_ss_id]) {
|
|
|
|
|
pcc_data[pcc_ss_id]->refcount++;
|
|
|
|
|
} else {
|
|
|
|
|
pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!pcc_data[pcc_ss_id])
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
pcc_data[pcc_ss_id]->refcount++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* An example CPC table looks like the following.
|
|
|
|
|
*
|
|
|
|
@ -661,6 +697,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
|
|
struct device *cpu_dev;
|
|
|
|
|
acpi_handle handle = pr->handle;
|
|
|
|
|
unsigned int num_ent, i, cpc_rev;
|
|
|
|
|
int pcc_subspace_id = -1;
|
|
|
|
|
acpi_status status;
|
|
|
|
|
int ret = -EFAULT;
|
|
|
|
|
|
|
|
|
@ -733,9 +770,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
|
|
* so extract it only once.
|
|
|
|
|
*/
|
|
|
|
|
if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
|
|
|
|
|
if (pcc_data.pcc_subspace_idx < 0)
|
|
|
|
|
pcc_data.pcc_subspace_idx = gas_t->access_width;
|
|
|
|
|
else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
|
|
|
|
|
if (pcc_subspace_id < 0) {
|
|
|
|
|
pcc_subspace_id = gas_t->access_width;
|
|
|
|
|
if (pcc_data_alloc(pcc_subspace_id))
|
|
|
|
|
goto out_free;
|
|
|
|
|
} else if (pcc_subspace_id != gas_t->access_width) {
|
|
|
|
|
pr_debug("Mismatched PCC ids.\n");
|
|
|
|
|
goto out_free;
|
|
|
|
|
}
|
|
|
|
@ -763,6 +802,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
|
|
goto out_free;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
|
|
|
|
|
/* Store CPU Logical ID */
|
|
|
|
|
cpc_ptr->cpu_id = pr->id;
|
|
|
|
|
|
|
|
|
@ -771,14 +811,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
|
|
/* Register PCC channel once for all CPUs. */
|
|
|
|
|
if (!pcc_data.pcc_channel_acquired) {
|
|
|
|
|
ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
|
|
|
|
|
/* Register PCC channel once for all PCC subspace id. */
|
|
|
|
|
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
|
|
|
|
|
ret = register_pcc_channel(pcc_subspace_id);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
|
|
init_rwsem(&pcc_data.pcc_lock);
|
|
|
|
|
init_waitqueue_head(&pcc_data.pcc_write_wait_q);
|
|
|
|
|
init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
|
|
|
|
|
init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Everything looks okay */
|
|
|
|
@ -831,6 +871,18 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
|
|
|
|
|
struct cpc_desc *cpc_ptr;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
void __iomem *addr;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
|
|
|
|
|
|
|
|
|
|
if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
|
|
|
|
|
if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
|
|
|
|
|
pcc_data[pcc_ss_id]->refcount--;
|
|
|
|
|
if (!pcc_data[pcc_ss_id]->refcount) {
|
|
|
|
|
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
|
|
|
|
|
pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
|
|
|
|
|
kfree(pcc_data[pcc_ss_id]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
|
|
|
|
|
if (!cpc_ptr)
|
|
|
|
@ -888,6 +940,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
|
|
|
|
|
{
|
|
|
|
|
int ret_val = 0;
|
|
|
|
|
void __iomem *vaddr = 0;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
|
|
|
|
struct cpc_reg *reg = ®_res->cpc_entry.reg;
|
|
|
|
|
|
|
|
|
|
if (reg_res->type == ACPI_TYPE_INTEGER) {
|
|
|
|
@ -897,7 +950,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
|
|
|
|
|
|
|
|
|
|
*val = 0;
|
|
|
|
|
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
|
|
|
|
|
vaddr = GET_PCC_VADDR(reg->address);
|
|
|
|
|
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
|
|
|
|
|
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
|
|
|
|
vaddr = reg_res->sys_mem_vaddr;
|
|
|
|
|
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
|
|
|
|
@ -932,10 +985,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
|
|
|
|
{
|
|
|
|
|
int ret_val = 0;
|
|
|
|
|
void __iomem *vaddr = 0;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
|
|
|
|
struct cpc_reg *reg = ®_res->cpc_entry.reg;
|
|
|
|
|
|
|
|
|
|
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
|
|
|
|
|
vaddr = GET_PCC_VADDR(reg->address);
|
|
|
|
|
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
|
|
|
|
|
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
|
|
|
|
vaddr = reg_res->sys_mem_vaddr;
|
|
|
|
|
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
|
|
|
|
@ -980,6 +1034,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
|
|
struct cpc_register_resource *highest_reg, *lowest_reg,
|
|
|
|
|
*lowest_non_linear_reg, *nominal_reg;
|
|
|
|
|
u64 high, low, nom, min_nonlinear;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
int ret = 0, regs_in_pcc = 0;
|
|
|
|
|
|
|
|
|
|
if (!cpc_desc) {
|
|
|
|
@ -996,9 +1052,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
|
|
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
|
|
|
|
|
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
|
|
|
|
|
regs_in_pcc = 1;
|
|
|
|
|
down_write(&pcc_data.pcc_lock);
|
|
|
|
|
down_write(&pcc_ss_data->pcc_lock);
|
|
|
|
|
/* Ring doorbell once to update PCC subspace */
|
|
|
|
|
if (send_pcc_cmd(CMD_READ) < 0) {
|
|
|
|
|
if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
|
|
|
|
|
ret = -EIO;
|
|
|
|
|
goto out_err;
|
|
|
|
|
}
|
|
|
|
@ -1021,7 +1077,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
|
if (regs_in_pcc)
|
|
|
|
|
up_write(&pcc_data.pcc_lock);
|
|
|
|
|
up_write(&pcc_ss_data->pcc_lock);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
|
|
|
|
@ -1038,6 +1094,8 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
|
|
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
|
|
|
|
struct cpc_register_resource *delivered_reg, *reference_reg,
|
|
|
|
|
*ref_perf_reg, *ctr_wrap_reg;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
u64 delivered, reference, ref_perf, ctr_wrap_time;
|
|
|
|
|
int ret = 0, regs_in_pcc = 0;
|
|
|
|
|
|
|
|
|
@ -1061,10 +1119,10 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
|
|
/* Are any of the regs PCC ?*/
|
|
|
|
|
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
|
|
|
|
|
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
|
|
|
|
|
down_write(&pcc_data.pcc_lock);
|
|
|
|
|
down_write(&pcc_ss_data->pcc_lock);
|
|
|
|
|
regs_in_pcc = 1;
|
|
|
|
|
/* Ring doorbell once to update PCC subspace */
|
|
|
|
|
if (send_pcc_cmd(CMD_READ) < 0) {
|
|
|
|
|
if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
|
|
|
|
|
ret = -EIO;
|
|
|
|
|
goto out_err;
|
|
|
|
|
}
|
|
|
|
@ -1094,7 +1152,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
|
|
perf_fb_ctrs->wraparound_time = ctr_wrap_time;
|
|
|
|
|
out_err:
|
|
|
|
|
if (regs_in_pcc)
|
|
|
|
|
up_write(&pcc_data.pcc_lock);
|
|
|
|
|
up_write(&pcc_ss_data->pcc_lock);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
|
|
|
|
@ -1110,6 +1168,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
|
{
|
|
|
|
|
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
|
|
|
|
|
struct cpc_register_resource *desired_reg;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
if (!cpc_desc) {
|
|
|
|
@ -1127,11 +1187,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
|
* achieve that goal here
|
|
|
|
|
*/
|
|
|
|
|
if (CPC_IN_PCC(desired_reg)) {
|
|
|
|
|
down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
|
|
|
|
|
if (pcc_data.platform_owns_pcc) {
|
|
|
|
|
ret = check_pcc_chan(false);
|
|
|
|
|
down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
|
|
|
|
|
if (pcc_ss_data->platform_owns_pcc) {
|
|
|
|
|
ret = check_pcc_chan(pcc_ss_id, false);
|
|
|
|
|
if (ret) {
|
|
|
|
|
up_read(&pcc_data.pcc_lock);
|
|
|
|
|
up_read(&pcc_ss_data->pcc_lock);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -1139,8 +1199,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
|
* Update the pending_write to make sure a PCC CMD_READ will not
|
|
|
|
|
* arrive and steal the channel during the switch to write lock
|
|
|
|
|
*/
|
|
|
|
|
pcc_data.pending_pcc_write_cmd = true;
|
|
|
|
|
cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
|
|
|
|
|
pcc_ss_data->pending_pcc_write_cmd = true;
|
|
|
|
|
cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
|
|
|
|
|
cpc_desc->write_cmd_status = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1151,7 +1211,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
|
cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
|
|
|
|
|
|
|
|
|
|
if (CPC_IN_PCC(desired_reg))
|
|
|
|
|
up_read(&pcc_data.pcc_lock); /* END Phase-I */
|
|
|
|
|
up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
|
|
|
|
|
/*
|
|
|
|
|
* This is Phase-II where we transfer the ownership of PCC to Platform
|
|
|
|
|
*
|
|
|
|
@ -1199,15 +1259,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
|
* the write command before servicing the read command
|
|
|
|
|
*/
|
|
|
|
|
if (CPC_IN_PCC(desired_reg)) {
|
|
|
|
|
if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
|
|
|
|
|
if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
|
|
|
|
|
/* Update only if there are pending write commands */
|
|
|
|
|
if (pcc_data.pending_pcc_write_cmd)
|
|
|
|
|
send_pcc_cmd(CMD_WRITE);
|
|
|
|
|
up_write(&pcc_data.pcc_lock); /* END Phase-II */
|
|
|
|
|
if (pcc_ss_data->pending_pcc_write_cmd)
|
|
|
|
|
send_pcc_cmd(pcc_ss_id, CMD_WRITE);
|
|
|
|
|
up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
|
|
|
|
|
} else
|
|
|
|
|
/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
|
|
|
|
|
wait_event(pcc_data.pcc_write_wait_q,
|
|
|
|
|
cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
|
|
|
|
|
wait_event(pcc_ss_data->pcc_write_wait_q,
|
|
|
|
|
cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
|
|
|
|
|
|
|
|
|
|
/* send_pcc_cmd updates the status in case of failure */
|
|
|
|
|
ret = cpc_desc->write_cmd_status;
|
|
|
|
@ -1240,6 +1300,8 @@ unsigned int cppc_get_transition_latency(int cpu_num)
|
|
|
|
|
unsigned int latency_ns = 0;
|
|
|
|
|
struct cpc_desc *cpc_desc;
|
|
|
|
|
struct cpc_register_resource *desired_reg;
|
|
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
|
|
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
|
|
|
|
|
|
|
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
|
|
|
|
|
if (!cpc_desc)
|
|
|
|
@ -1249,11 +1311,11 @@ unsigned int cppc_get_transition_latency(int cpu_num)
|
|
|
|
|
if (!CPC_IN_PCC(desired_reg))
|
|
|
|
|
return CPUFREQ_ETERNAL;
|
|
|
|
|
|
|
|
|
|
if (pcc_data.pcc_mpar)
|
|
|
|
|
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
|
|
|
|
|
if (pcc_ss_data->pcc_mpar)
|
|
|
|
|
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
|
|
|
|
|
|
|
|
|
|
latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
|
|
|
|
|
latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
|
|
|
|
|
latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
|
|
|
|
|
latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
|
|
|
|
|
|
|
|
|
|
return latency_ns;
|
|
|
|
|
}
|
|
|
|
|