x86/sgx: Rename sgx_encl_ewb_cpumask() as sgx_encl_cpumask()
sgx_encl_ewb_cpumask() is no longer unique to the reclaimer where it is used during the EWB ENCLS leaf function when EPC pages are written out to main memory and sgx_encl_ewb_cpumask() is used to learn which CPUs might have executed the enclave to ensure that TLBs are cleared. Upcoming SGX2 enabling will use sgx_encl_ewb_cpumask() during the EMODPR and EMODT ENCLS leaf functions that make changes to enclave pages. The function is needed for the same reason it is used now: to learn which CPUs might have executed the enclave to ensure that TLBs no longer point to the changed pages. Rename sgx_encl_ewb_cpumask() to sgx_encl_cpumask() to reflect the broader usage. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org> Link: https://lkml.kernel.org/r/d4d08c449450a13d8dd3bb6c2b1af03895586d4f.1652137848.git.reinette.chatre@intel.com
This commit is contained in:
parent
7f391752d4
commit
bdaa8799f6
@ -715,7 +715,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
|
||||
}
|
||||
|
||||
/**
|
||||
* sgx_encl_ewb_cpumask() - Query which CPUs might be accessing the enclave
|
||||
* sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
|
||||
* @encl: the enclave
|
||||
*
|
||||
* Some SGX functions require that no cached linear-to-physical address
|
||||
@ -740,7 +740,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
|
||||
* The following flow is used to support SGX functions that require that
|
||||
* no cached linear-to-physical address mappings are present:
|
||||
* 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
|
||||
* 2) Use this function (sgx_encl_ewb_cpumask()) to query which CPUs might be
|
||||
* 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
|
||||
* accessing the enclave.
|
||||
* 3) Send IPI to identified CPUs, kicking them out of the enclave and
|
||||
* thus flushing all locally cached linear-to-physical address mappings.
|
||||
@ -757,7 +757,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
|
||||
*
|
||||
* Return: cpumask of CPUs that might be accessing @encl
|
||||
*/
|
||||
const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
|
||||
const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
|
||||
{
|
||||
cpumask_t *cpumask = &encl->cpumask;
|
||||
struct sgx_encl_mm *encl_mm;
|
||||
|
@ -105,7 +105,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
|
||||
|
||||
void sgx_encl_release(struct kref *ref);
|
||||
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
|
||||
const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl);
|
||||
const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl);
|
||||
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
struct sgx_backing *backing);
|
||||
void sgx_encl_put_backing(struct sgx_backing *backing);
|
||||
|
@ -251,7 +251,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
|
||||
* miss cpus that entered the enclave between
|
||||
* generating the mask and incrementing epoch.
|
||||
*/
|
||||
on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
|
||||
on_each_cpu_mask(sgx_encl_cpumask(encl),
|
||||
sgx_ipi_cb, NULL, 1);
|
||||
ret = __sgx_encl_ewb(epc_page, va_slot, backing);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user