140 lines
2.8 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#include <stddef.h>
#include "defines.h"
/*
* Data buffer spanning two pages that will be placed first in .data
* segment. Even if not used internally the second page is needed by
* external test manipulating page permissions.
*/
static uint8_t encl_buffer[8192] = { 1 };
selftests/sgx: Add test for EPCM permission changes EPCM permission changes could be made from within (to relax permissions) or out (to restrict permissions) the enclave. Kernel support is needed when permissions are restricted to be able to call the privileged ENCLS[EMODPR] instruction. EPCM permissions can be relaxed via ENCLU[EMODPE] from within the enclave but the enclave still depends on the kernel to install PTEs with the needed permissions. Add a test that exercises a few of the enclave page permission flows: 1) Test starts with a RW (from enclave and kernel perspective) enclave page that is mapped via a RW VMA. 2) Use the SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl() to restrict the enclave (EPCM) page permissions to read-only. 3) Run ENCLU[EACCEPT] from within the enclave to accept the new page permissions. 4) Attempt to write to the enclave page from within the enclave - this should fail with a page fault on the EPCM permissions since the page table entry continues to allow RW access. 5) Restore EPCM permissions to RW by running ENCLU[EMODPE] from within the enclave. 6) Attempt to write to the enclave page from within the enclave - this should succeed since both EPCM and PTE permissions allow this access. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Jarkko Sakkinen <jarkko@kernel.org> Link: https://lkml.kernel.org/r/2617bf2b2d1e27ca1d0096e1192ae5896baf3f80.1652137848.git.reinette.chatre@intel.com
2022-05-10 11:08:58 -07:00
enum sgx_enclu_function {
EACCEPT = 0x5,
EMODPE = 0x6,
};
static void do_encl_emodpe(void *_op)
{
struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
struct encl_op_emodpe *op = _op;
secinfo.flags = op->flags;
asm volatile(".byte 0x0f, 0x01, 0xd7"
:
: "a" (EMODPE),
"b" (&secinfo),
"c" (op->epc_addr));
}
static void do_encl_eaccept(void *_op)
{
struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
struct encl_op_eaccept *op = _op;
int rax;
secinfo.flags = op->flags;
asm volatile(".byte 0x0f, 0x01, 0xd7"
: "=a" (rax)
: "a" (EACCEPT),
"b" (&secinfo),
"c" (op->epc_addr));
op->ret = rax;
}
static void *memcpy(void *dest, const void *src, size_t n)
{
size_t i;
for (i = 0; i < n; i++)
((char *)dest)[i] = ((char *)src)[i];
return dest;
}
static void *memset(void *dest, int c, size_t n)
{
size_t i;
for (i = 0; i < n; i++)
((char *)dest)[i] = c;
return dest;
}
static void do_encl_init_tcs_page(void *_op)
{
struct encl_op_init_tcs_page *op = _op;
void *tcs = (void *)op->tcs_page;
uint32_t val_32;
memset(tcs, 0, 16); /* STATE and FLAGS */
memcpy(tcs + 16, &op->ssa, 8); /* OSSA */
memset(tcs + 24, 0, 4); /* CSSA */
val_32 = 1;
memcpy(tcs + 28, &val_32, 4); /* NSSA */
memcpy(tcs + 32, &op->entry, 8); /* OENTRY */
memset(tcs + 40, 0, 24); /* AEP, OFSBASE, OGSBASE */
val_32 = 0xFFFFFFFF;
memcpy(tcs + 64, &val_32, 4); /* FSLIMIT */
memcpy(tcs + 68, &val_32, 4); /* GSLIMIT */
memset(tcs + 72, 0, 4024); /* Reserved */
}
static void do_encl_op_put_to_buf(void *op)
{
struct encl_op_put_to_buf *op2 = op;
memcpy(&encl_buffer[0], &op2->value, 8);
}
static void do_encl_op_get_from_buf(void *op)
{
struct encl_op_get_from_buf *op2 = op;
memcpy(&op2->value, &encl_buffer[0], 8);
}
static void do_encl_op_put_to_addr(void *_op)
{
struct encl_op_put_to_addr *op = _op;
memcpy((void *)op->addr, &op->value, 8);
}
static void do_encl_op_get_from_addr(void *_op)
{
struct encl_op_get_from_addr *op = _op;
memcpy(&op->value, (void *)op->addr, 8);
}
static void do_encl_op_nop(void *_op)
{
}
void encl_body(void *rdi, void *rsi)
{
const void (*encl_op_array[ENCL_OP_MAX])(void *) = {
do_encl_op_put_to_buf,
do_encl_op_get_from_buf,
do_encl_op_put_to_addr,
do_encl_op_get_from_addr,
do_encl_op_nop,
selftests/sgx: Add test for EPCM permission changes EPCM permission changes could be made from within (to relax permissions) or out (to restrict permissions) the enclave. Kernel support is needed when permissions are restricted to be able to call the privileged ENCLS[EMODPR] instruction. EPCM permissions can be relaxed via ENCLU[EMODPE] from within the enclave but the enclave still depends on the kernel to install PTEs with the needed permissions. Add a test that exercises a few of the enclave page permission flows: 1) Test starts with a RW (from enclave and kernel perspective) enclave page that is mapped via a RW VMA. 2) Use the SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl() to restrict the enclave (EPCM) page permissions to read-only. 3) Run ENCLU[EACCEPT] from within the enclave to accept the new page permissions. 4) Attempt to write to the enclave page from within the enclave - this should fail with a page fault on the EPCM permissions since the page table entry continues to allow RW access. 5) Restore EPCM permissions to RW by running ENCLU[EMODPE] from within the enclave. 6) Attempt to write to the enclave page from within the enclave - this should succeed since both EPCM and PTE permissions allow this access. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Jarkko Sakkinen <jarkko@kernel.org> Link: https://lkml.kernel.org/r/2617bf2b2d1e27ca1d0096e1192ae5896baf3f80.1652137848.git.reinette.chatre@intel.com
2022-05-10 11:08:58 -07:00
do_encl_eaccept,
do_encl_emodpe,
do_encl_init_tcs_page,
};
struct encl_op_header *op = (struct encl_op_header *)rdi;
if (op->type < ENCL_OP_MAX)
(*encl_op_array[op->type])(op);
}