2022-11-30 22:21:36 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
# include <linux/libnvdimm.h>
# include <asm/unaligned.h>
# include <linux/module.h>
# include <linux/async.h>
# include <linux/slab.h>
2022-11-30 22:22:21 +03:00
# include <linux/memregion.h>
2022-11-30 22:21:36 +03:00
# include "cxlmem.h"
# include "cxl.h"
static unsigned long cxl_pmem_get_security_flags ( struct nvdimm * nvdimm ,
enum nvdimm_passphrase_type ptype )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-11-30 22:21:36 +03:00
unsigned long security_flags = 0 ;
2022-12-06 07:22:28 +03:00
struct cxl_get_security_output {
__le32 flags ;
} out ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2022-11-30 22:21:36 +03:00
u32 sec_out ;
int rc ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_GET_SECURITY_STATE ,
. size_out = sizeof ( out ) ,
. payload_out = & out ,
} ;
2023-06-15 04:30:02 +03:00
rc = cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:21:36 +03:00
if ( rc < 0 )
return 0 ;
2022-12-06 07:22:28 +03:00
sec_out = le32_to_cpu ( out . flags ) ;
2023-06-12 21:10:33 +03:00
/* cache security state */
2023-06-26 03:16:51 +03:00
mds - > security . state = sec_out ;
2023-06-12 21:10:33 +03:00
2022-11-30 22:21:36 +03:00
if ( ptype = = NVDIMM_MASTER ) {
if ( sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET )
set_bit ( NVDIMM_SECURITY_UNLOCKED , & security_flags ) ;
else
set_bit ( NVDIMM_SECURITY_DISABLED , & security_flags ) ;
if ( sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT )
set_bit ( NVDIMM_SECURITY_FROZEN , & security_flags ) ;
return security_flags ;
}
if ( sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET ) {
if ( sec_out & CXL_PMEM_SEC_STATE_FROZEN | |
sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT )
set_bit ( NVDIMM_SECURITY_FROZEN , & security_flags ) ;
if ( sec_out & CXL_PMEM_SEC_STATE_LOCKED )
set_bit ( NVDIMM_SECURITY_LOCKED , & security_flags ) ;
else
set_bit ( NVDIMM_SECURITY_UNLOCKED , & security_flags ) ;
} else {
set_bit ( NVDIMM_SECURITY_DISABLED , & security_flags ) ;
}
return security_flags ;
}
2022-11-30 22:21:47 +03:00
static int cxl_pmem_security_change_key ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * old_data ,
const struct nvdimm_key_data * new_data ,
enum nvdimm_passphrase_type ptype )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2022-11-30 22:21:47 +03:00
struct cxl_set_pass set_pass ;
2022-12-06 07:22:33 +03:00
set_pass = ( struct cxl_set_pass ) {
. type = ptype = = NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER ,
} ;
2022-11-30 22:21:47 +03:00
memcpy ( set_pass . old_pass , old_data - > data , NVDIMM_PASSPHRASE_LEN ) ;
memcpy ( set_pass . new_pass , new_data - > data , NVDIMM_PASSPHRASE_LEN ) ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_SET_PASSPHRASE ,
. size_in = sizeof ( set_pass ) ,
. payload_in = & set_pass ,
} ;
2023-06-15 04:30:02 +03:00
return cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:21:47 +03:00
}
2022-11-30 22:22:44 +03:00
static int __cxl_pmem_security_disable ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * key_data ,
enum nvdimm_passphrase_type ptype )
2022-11-30 22:21:58 +03:00
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-11-30 22:21:58 +03:00
struct cxl_disable_pass dis_pass ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2022-11-30 22:21:58 +03:00
2022-12-06 07:22:33 +03:00
dis_pass = ( struct cxl_disable_pass ) {
. type = ptype = = NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER ,
} ;
2022-11-30 22:21:58 +03:00
memcpy ( dis_pass . pass , key_data - > data , NVDIMM_PASSPHRASE_LEN ) ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_DISABLE_PASSPHRASE ,
. size_in = sizeof ( dis_pass ) ,
. payload_in = & dis_pass ,
} ;
2023-06-15 04:30:02 +03:00
return cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:21:58 +03:00
}
2022-11-30 22:22:44 +03:00
static int cxl_pmem_security_disable ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * key_data )
{
return __cxl_pmem_security_disable ( nvdimm , key_data , NVDIMM_USER ) ;
}
static int cxl_pmem_security_disable_master ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * key_data )
{
return __cxl_pmem_security_disable ( nvdimm , key_data , NVDIMM_MASTER ) ;
}
2022-11-30 22:22:10 +03:00
static int cxl_pmem_security_freeze ( struct nvdimm * nvdimm )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd = {
. opcode = CXL_MBOX_OP_FREEZE_SECURITY ,
} ;
2022-11-30 22:22:10 +03:00
2023-06-15 04:30:02 +03:00
return cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:22:10 +03:00
}
2022-11-30 22:22:21 +03:00
static int cxl_pmem_security_unlock ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * key_data )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-11-30 22:22:21 +03:00
u8 pass [ NVDIMM_PASSPHRASE_LEN ] ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2022-11-30 22:22:21 +03:00
int rc ;
memcpy ( pass , key_data - > data , NVDIMM_PASSPHRASE_LEN ) ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_UNLOCK ,
. size_in = NVDIMM_PASSPHRASE_LEN ,
. payload_in = pass ,
} ;
2023-06-15 04:30:02 +03:00
rc = cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:22:21 +03:00
if ( rc < 0 )
return rc ;
return 0 ;
}
2022-11-30 22:22:32 +03:00
static int cxl_pmem_security_passphrase_erase ( struct nvdimm * nvdimm ,
const struct nvdimm_key_data * key ,
enum nvdimm_passphrase_type ptype )
{
struct cxl_nvdimm * cxl_nvd = nvdimm_provider_data ( nvdimm ) ;
struct cxl_memdev * cxlmd = cxl_nvd - > cxlmd ;
2023-06-15 04:30:02 +03:00
struct cxl_memdev_state * mds = to_cxl_memdev_state ( cxlmd - > cxlds ) ;
2022-12-06 07:22:33 +03:00
struct cxl_mbox_cmd mbox_cmd ;
2022-11-30 22:22:32 +03:00
struct cxl_pass_erase erase ;
int rc ;
2022-12-06 07:22:33 +03:00
erase = ( struct cxl_pass_erase ) {
. type = ptype = = NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER ,
} ;
2022-11-30 22:22:32 +03:00
memcpy ( erase . pass , key - > data , NVDIMM_PASSPHRASE_LEN ) ;
2022-12-06 07:22:33 +03:00
mbox_cmd = ( struct cxl_mbox_cmd ) {
. opcode = CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE ,
. size_in = sizeof ( erase ) ,
. payload_in = & erase ,
} ;
2023-06-15 04:30:02 +03:00
rc = cxl_internal_send_cmd ( mds , & mbox_cmd ) ;
2022-11-30 22:22:32 +03:00
if ( rc < 0 )
return rc ;
return 0 ;
}
2022-11-30 22:21:36 +03:00
static const struct nvdimm_security_ops __cxl_security_ops = {
. get_flags = cxl_pmem_get_security_flags ,
2022-11-30 22:21:47 +03:00
. change_key = cxl_pmem_security_change_key ,
2022-11-30 22:21:58 +03:00
. disable = cxl_pmem_security_disable ,
2022-11-30 22:22:10 +03:00
. freeze = cxl_pmem_security_freeze ,
2022-11-30 22:22:21 +03:00
. unlock = cxl_pmem_security_unlock ,
2022-11-30 22:22:32 +03:00
. erase = cxl_pmem_security_passphrase_erase ,
2022-11-30 22:22:44 +03:00
. disable_master = cxl_pmem_security_disable_master ,
2022-11-30 22:21:36 +03:00
} ;
const struct nvdimm_security_ops * cxl_security_ops = & __cxl_security_ops ;