2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-06-09 23:09:36 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*/
# include <linux/device.h>
# include <linux/ndctl.h>
2017-06-04 04:18:39 +03:00
# include <linux/uuid.h>
2015-05-30 19:36:02 +03:00
# include <linux/slab.h>
2015-06-09 23:09:36 +03:00
# include <linux/io.h>
# include <linux/nd.h>
# include "nd-core.h"
# include "label.h"
# include "nd.h"
2017-06-04 04:18:39 +03:00
static guid_t nvdimm_btt_guid ;
2017-06-28 23:25:00 +03:00
static guid_t nvdimm_btt2_guid ;
2017-06-04 04:18:39 +03:00
static guid_t nvdimm_pfn_guid ;
static guid_t nvdimm_dax_guid ;
2019-05-16 19:04:53 +03:00
static const char NSINDEX_SIGNATURE [ ] = " NAMESPACE_INDEX \0 " ;
2015-06-09 23:09:36 +03:00
static u32 best_seq ( u32 a , u32 b )
{
a & = NSINDEX_SEQ_MASK ;
b & = NSINDEX_SEQ_MASK ;
if ( a = = 0 | | a = = b )
return b ;
else if ( b = = 0 )
return a ;
else if ( nd_inc_seq ( a ) = = b )
return b ;
else
return a ;
}
2017-06-03 12:30:43 +03:00
unsigned sizeof_namespace_label ( struct nvdimm_drvdata * ndd )
{
return ndd - > nslabel_size ;
}
2018-02-24 00:59:22 +03:00
static size_t __sizeof_namespace_index ( u32 nslot )
{
return ALIGN ( sizeof ( struct nd_namespace_index ) + DIV_ROUND_UP ( nslot , 8 ) ,
NSINDEX_ALIGN ) ;
}
static int __nvdimm_num_label_slots ( struct nvdimm_drvdata * ndd ,
size_t index_size )
{
return ( ndd - > nsarea . config_size - index_size * 2 ) /
sizeof_namespace_label ( ndd ) ;
}
2017-08-30 04:28:18 +03:00
int nvdimm_num_label_slots ( struct nvdimm_drvdata * ndd )
2015-06-09 23:09:36 +03:00
{
2018-02-24 00:59:22 +03:00
u32 tmp_nslot , n ;
tmp_nslot = ndd - > nsarea . config_size / sizeof_namespace_label ( ndd ) ;
n = __sizeof_namespace_index ( tmp_nslot ) / NSINDEX_ALIGN ;
return __nvdimm_num_label_slots ( ndd , NSINDEX_ALIGN * n ) ;
2017-08-30 04:28:18 +03:00
}
2015-06-09 23:09:36 +03:00
2017-08-30 04:28:18 +03:00
size_t sizeof_namespace_index ( struct nvdimm_drvdata * ndd )
{
u32 nslot , space , size ;
2015-06-09 23:09:36 +03:00
/*
2018-02-24 00:59:22 +03:00
* Per UEFI 2.7 , the minimum size of the Label Storage Area is large
* enough to hold 2 index blocks and 2 labels . The minimum index
2018-10-11 02:38:55 +03:00
* block size is 256 bytes . The label size is 128 for namespaces
* prior to version 1.2 and at minimum 256 for version 1.2 and later .
2015-06-09 23:09:36 +03:00
*/
2017-08-30 04:28:18 +03:00
nslot = nvdimm_num_label_slots ( ndd ) ;
space = ndd - > nsarea . config_size - nslot * sizeof_namespace_label ( ndd ) ;
2018-02-24 00:59:22 +03:00
size = __sizeof_namespace_index ( nslot ) * 2 ;
if ( size < = space & & nslot > = 2 )
2017-08-30 04:28:18 +03:00
return size / 2 ;
2015-06-09 23:09:36 +03:00
2017-08-30 04:28:18 +03:00
dev_err ( ndd - > dev , " label area (%d) too small to host (%d byte) labels \n " ,
ndd - > nsarea . config_size , sizeof_namespace_label ( ndd ) ) ;
return 0 ;
2015-05-30 19:36:02 +03:00
}
2017-06-03 12:30:43 +03:00
static int __nd_label_validate ( struct nvdimm_drvdata * ndd )
2015-06-09 23:09:36 +03:00
{
/*
* On media label format consists of two index blocks followed
* by an array of labels . None of these structures are ever
* updated in place . A sequence number tracks the current
* active index and the next one to write , while labels are
* written to free slots .
*
* + - - - - - - - - - - - - +
* | |
* | nsindex0 |
* | |
* + - - - - - - - - - - - - +
* | |
* | nsindex1 |
* | |
* + - - - - - - - - - - - - +
* | label0 |
* + - - - - - - - - - - - - +
* | label1 |
* + - - - - - - - - - - - - +
* | |
* . . . . nslot . . .
* | |
* + - - - - - - - - - - - - +
* | labelN |
* + - - - - - - - - - - - - +
*/
struct nd_namespace_index * nsindex [ ] = {
to_namespace_index ( ndd , 0 ) ,
to_namespace_index ( ndd , 1 ) ,
} ;
const int num_index = ARRAY_SIZE ( nsindex ) ;
struct device * dev = ndd - > dev ;
bool valid [ 2 ] = { 0 } ;
int i , num_valid = 0 ;
u32 seq ;
for ( i = 0 ; i < num_index ; i + + ) {
u32 nslot ;
u8 sig [ NSINDEX_SIG_LEN ] ;
u64 sum_save , sum , size ;
2017-06-03 12:30:43 +03:00
unsigned int version , labelsize ;
2015-06-09 23:09:36 +03:00
memcpy ( sig , nsindex [ i ] - > sig , NSINDEX_SIG_LEN ) ;
if ( memcmp ( sig , NSINDEX_SIGNATURE , NSINDEX_SIG_LEN ) ! = 0 ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d signature invalid \n " , i ) ;
2015-06-09 23:09:36 +03:00
continue ;
}
2017-06-03 12:30:43 +03:00
/* label sizes larger than 128 arrived with v1.2 */
version = __le16_to_cpu ( nsindex [ i ] - > major ) * 100
+ __le16_to_cpu ( nsindex [ i ] - > minor ) ;
if ( version > = 102 )
labelsize = 1 < < ( 7 + nsindex [ i ] - > labelsize ) ;
else
labelsize = 128 ;
if ( labelsize ! = sizeof_namespace_label ( ndd ) ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d labelsize %d invalid \n " ,
i , nsindex [ i ] - > labelsize ) ;
2017-06-03 12:30:43 +03:00
continue ;
}
2015-06-09 23:09:36 +03:00
sum_save = __le64_to_cpu ( nsindex [ i ] - > checksum ) ;
nsindex [ i ] - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nsindex [ i ] , sizeof_namespace_index ( ndd ) , 1 ) ;
nsindex [ i ] - > checksum = __cpu_to_le64 ( sum_save ) ;
if ( sum ! = sum_save ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d checksum invalid \n " , i ) ;
2015-06-09 23:09:36 +03:00
continue ;
}
seq = __le32_to_cpu ( nsindex [ i ] - > seq ) ;
if ( ( seq & NSINDEX_SEQ_MASK ) = = 0 ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d sequence: %#x invalid \n " , i , seq ) ;
2015-06-09 23:09:36 +03:00
continue ;
}
/* sanity check the index against expected values */
if ( __le64_to_cpu ( nsindex [ i ] - > myoff )
! = i * sizeof_namespace_index ( ndd ) ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d myoff: %#llx invalid \n " ,
i , ( unsigned long long )
2015-06-09 23:09:36 +03:00
__le64_to_cpu ( nsindex [ i ] - > myoff ) ) ;
continue ;
}
if ( __le64_to_cpu ( nsindex [ i ] - > otheroff )
! = ( ! i ) * sizeof_namespace_index ( ndd ) ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d otheroff: %#llx invalid \n " ,
i , ( unsigned long long )
2015-06-09 23:09:36 +03:00
__le64_to_cpu ( nsindex [ i ] - > otheroff ) ) ;
continue ;
}
2018-10-11 02:38:41 +03:00
if ( __le64_to_cpu ( nsindex [ i ] - > labeloff )
! = 2 * sizeof_namespace_index ( ndd ) ) {
dev_dbg ( dev , " nsindex%d labeloff: %#llx invalid \n " ,
i , ( unsigned long long )
__le64_to_cpu ( nsindex [ i ] - > labeloff ) ) ;
continue ;
}
2015-06-09 23:09:36 +03:00
size = __le64_to_cpu ( nsindex [ i ] - > mysize ) ;
if ( size > sizeof_namespace_index ( ndd )
| | size < sizeof ( struct nd_namespace_index ) ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d mysize: %#llx invalid \n " , i , size ) ;
2015-06-09 23:09:36 +03:00
continue ;
}
nslot = __le32_to_cpu ( nsindex [ i ] - > nslot ) ;
2017-06-03 12:30:43 +03:00
if ( nslot * sizeof_namespace_label ( ndd )
2015-06-09 23:09:36 +03:00
+ 2 * sizeof_namespace_index ( ndd )
> ndd - > nsarea . config_size ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " nsindex%d nslot: %u invalid, config_size: %#x \n " ,
i , nslot , ndd - > nsarea . config_size ) ;
2015-06-09 23:09:36 +03:00
continue ;
}
valid [ i ] = true ;
num_valid + + ;
}
switch ( num_valid ) {
case 0 :
break ;
case 1 :
for ( i = 0 ; i < num_index ; i + + )
if ( valid [ i ] )
return i ;
/* can't have num_valid > 0 but valid[] = { false, false } */
WARN_ON ( 1 ) ;
break ;
default :
/* pick the best index... */
seq = best_seq ( __le32_to_cpu ( nsindex [ 0 ] - > seq ) ,
__le32_to_cpu ( nsindex [ 1 ] - > seq ) ) ;
if ( seq = = ( __le32_to_cpu ( nsindex [ 1 ] - > seq ) & NSINDEX_SEQ_MASK ) )
return 1 ;
else
return 0 ;
break ;
}
return - 1 ;
}
2018-10-11 02:39:35 +03:00
static int nd_label_validate ( struct nvdimm_drvdata * ndd )
2017-06-03 12:30:43 +03:00
{
/*
* In order to probe for and validate namespace index blocks we
* need to know the size of the labels , and we can ' t trust the
* size of the labels until we validate the index blocks .
* Resolve this dependency loop by probing for known label
2017-06-07 20:19:46 +03:00
* sizes , but default to v1 .2 256 - byte namespace labels if
* discovery fails .
2017-06-03 12:30:43 +03:00
*/
2017-06-07 20:19:46 +03:00
int label_size [ ] = { 128 , 256 } ;
2017-06-03 12:30:43 +03:00
int i , rc ;
for ( i = 0 ; i < ARRAY_SIZE ( label_size ) ; i + + ) {
ndd - > nslabel_size = label_size [ i ] ;
rc = __nd_label_validate ( ndd ) ;
if ( rc > = 0 )
return rc ;
}
return - 1 ;
}
2018-10-11 02:39:35 +03:00
static void nd_label_copy ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_index * dst ,
struct nd_namespace_index * src )
2015-06-09 23:09:36 +03:00
{
2018-10-11 02:39:06 +03:00
/* just exit if either destination or source is NULL */
if ( ! dst | | ! src )
2015-06-09 23:09:36 +03:00
return ;
memcpy ( dst , src , sizeof_namespace_index ( ndd ) ) ;
}
static struct nd_namespace_label * nd_label_base ( struct nvdimm_drvdata * ndd )
{
void * base = to_namespace_index ( ndd , 0 ) ;
return base + 2 * sizeof_namespace_index ( ndd ) ;
}
2015-05-30 19:36:02 +03:00
static int to_slot ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
2017-06-03 12:30:43 +03:00
unsigned long label , base ;
label = ( unsigned long ) nd_label ;
base = ( unsigned long ) nd_label_base ( ndd ) ;
return ( label - base ) / sizeof_namespace_label ( ndd ) ;
}
static struct nd_namespace_label * to_label ( struct nvdimm_drvdata * ndd , int slot )
{
unsigned long label , base ;
base = ( unsigned long ) nd_label_base ( ndd ) ;
label = base + sizeof_namespace_label ( ndd ) * slot ;
return ( struct nd_namespace_label * ) label ;
2015-05-30 19:36:02 +03:00
}
2015-06-09 23:09:36 +03:00
# define for_each_clear_bit_le(bit, addr, size) \
for ( ( bit ) = find_next_zero_bit_le ( ( addr ) , ( size ) , 0 ) ; \
( bit ) < ( size ) ; \
( bit ) = find_next_zero_bit_le ( ( addr ) , ( size ) , ( bit ) + 1 ) )
/**
2015-05-30 19:36:02 +03:00
* preamble_index - common variable initialization for nd_label_ * routines
2015-06-09 23:09:36 +03:00
* @ ndd : dimm container for the relevant label set
2015-05-30 19:36:02 +03:00
* @ idx : namespace_index index
2015-06-09 23:09:36 +03:00
* @ nsindex_out : on return set to the currently active namespace index
* @ free : on return set to the free label bitmap in the index
* @ nslot : on return set to the number of slots in the label space
*/
2015-05-30 19:36:02 +03:00
static bool preamble_index ( struct nvdimm_drvdata * ndd , int idx ,
2015-06-09 23:09:36 +03:00
struct nd_namespace_index * * nsindex_out ,
unsigned long * * free , u32 * nslot )
{
struct nd_namespace_index * nsindex ;
2015-05-30 19:36:02 +03:00
nsindex = to_namespace_index ( ndd , idx ) ;
2015-06-09 23:09:36 +03:00
if ( nsindex = = NULL )
return false ;
* free = ( unsigned long * ) nsindex - > free ;
* nslot = __le32_to_cpu ( nsindex - > nslot ) ;
* nsindex_out = nsindex ;
return true ;
}
2015-06-18 00:14:46 +03:00
char * nd_label_gen_id ( struct nd_label_id * label_id , u8 * uuid , u32 flags )
2015-06-09 23:09:36 +03:00
{
if ( ! label_id | | ! uuid )
return NULL ;
snprintf ( label_id - > id , ND_LABEL_ID_SIZE , " %s-%pUb " ,
flags & NSLABEL_FLAG_LOCAL ? " blk " : " pmem " , uuid ) ;
return label_id - > id ;
}
2015-05-30 19:36:02 +03:00
static bool preamble_current ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_index * * nsindex ,
unsigned long * * free , u32 * nslot )
{
return preamble_index ( ndd , ndd - > ns_current , nsindex ,
free , nslot ) ;
}
static bool preamble_next ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_index * * nsindex ,
unsigned long * * free , u32 * nslot )
{
return preamble_index ( ndd , ndd - > ns_next , nsindex ,
free , nslot ) ;
}
2017-06-07 00:56:43 +03:00
static bool slot_valid ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label , u32 slot )
2015-06-09 23:09:36 +03:00
{
/* check that we are written where we expect to be written */
if ( slot ! = __le32_to_cpu ( nd_label - > slot ) )
return false ;
2017-06-07 00:56:43 +03:00
/* check checksum */
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum , sum_save ;
sum_save = __le64_to_cpu ( nd_label - > checksum ) ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label , sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum_save ) ;
if ( sum ! = sum_save ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " fail checksum. slot: %d expect: %#llx \n " ,
slot , sum ) ;
2017-06-07 00:56:43 +03:00
return false ;
}
}
2015-06-09 23:09:36 +03:00
return true ;
}
int nd_label_reserve_dpa ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return 0 ; /* no label, nothing to reserve */
for_each_clear_bit_le ( slot , free , nslot ) {
2019-02-03 03:35:26 +03:00
struct nvdimm * nvdimm = to_nvdimm ( ndd - > dev ) ;
2015-06-09 23:09:36 +03:00
struct nd_namespace_label * nd_label ;
struct nd_region * nd_region = NULL ;
u8 label_uuid [ NSLABEL_UUID_LEN ] ;
struct nd_label_id label_id ;
struct resource * res ;
u32 flags ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-06-09 23:09:36 +03:00
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) )
2015-06-09 23:09:36 +03:00
continue ;
memcpy ( label_uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
flags = __le32_to_cpu ( nd_label - > flags ) ;
2019-02-03 03:35:26 +03:00
if ( test_bit ( NDD_NOBLK , & nvdimm - > flags ) )
flags & = ~ NSLABEL_FLAG_LOCAL ;
2015-06-09 23:09:36 +03:00
nd_label_gen_id ( & label_id , label_uuid , flags ) ;
res = nvdimm_allocate_dpa ( ndd , & label_id ,
__le64_to_cpu ( nd_label - > dpa ) ,
__le64_to_cpu ( nd_label - > rawsize ) ) ;
nd_dbg_dpa ( nd_region , ndd , res , " reserve \n " ) ;
if ( ! res )
return - EBUSY ;
}
return 0 ;
}
2015-06-18 00:14:46 +03:00
2018-10-11 02:39:20 +03:00
int nd_label_data_init ( struct nvdimm_drvdata * ndd )
{
2018-10-11 02:39:35 +03:00
size_t config_size , read_size , max_xfer , offset ;
struct nd_namespace_index * nsindex ;
unsigned int i ;
2018-10-11 02:39:20 +03:00
int rc = 0 ;
2018-10-12 04:25:20 +03:00
u32 nslot ;
2018-10-11 02:39:20 +03:00
if ( ndd - > data )
return 0 ;
if ( ndd - > nsarea . status | | ndd - > nsarea . max_xfer = = 0 ) {
dev_dbg ( ndd - > dev , " failed to init config data area: (%u:%u) \n " ,
ndd - > nsarea . max_xfer , ndd - > nsarea . config_size ) ;
return - ENXIO ;
}
/*
* We need to determine the maximum index area as this is the section
* we must read and validate before we can start processing labels .
*
* If the area is too small to contain the two indexes and 2 labels
* then we abort .
*
* Start at a label size of 128 as this should result in the largest
* possible namespace index size .
*/
ndd - > nslabel_size = 128 ;
read_size = sizeof_namespace_index ( ndd ) * 2 ;
if ( ! read_size )
return - ENXIO ;
/* Allocate config data */
config_size = ndd - > nsarea . config_size ;
ndd - > data = kvzalloc ( config_size , GFP_KERNEL ) ;
if ( ! ndd - > data )
return - ENOMEM ;
2018-10-11 02:39:35 +03:00
/*
* We want to guarantee as few reads as possible while conserving
* memory . To do that we figure out how much unused space will be left
* in the last read , divide that by the total number of reads it is
* going to take given our maximum transfer size , and then reduce our
* maximum transfer size based on that result .
*/
max_xfer = min_t ( size_t , ndd - > nsarea . max_xfer , config_size ) ;
if ( read_size < max_xfer ) {
/* trim waste */
max_xfer - = ( ( max_xfer - 1 ) - ( config_size - 1 ) % max_xfer ) /
DIV_ROUND_UP ( config_size , max_xfer ) ;
/* make certain we read indexes in exactly 1 read */
if ( max_xfer < read_size )
max_xfer = read_size ;
}
/* Make our initial read size a multiple of max_xfer size */
read_size = min ( DIV_ROUND_UP ( read_size , max_xfer ) * max_xfer ,
config_size ) ;
/* Read the index data */
rc = nvdimm_get_config_data ( ndd , ndd - > data , 0 , read_size ) ;
if ( rc )
goto out_err ;
/* Validate index data, if not valid assume all labels are invalid */
ndd - > ns_current = nd_label_validate ( ndd ) ;
if ( ndd - > ns_current < 0 )
return 0 ;
/* Record our index values */
ndd - > ns_next = nd_label_next_nsindex ( ndd - > ns_current ) ;
/* Copy "current" index on top of the "next" index */
nsindex = to_current_namespace_index ( ndd ) ;
nd_label_copy ( ndd , to_next_namespace_index ( ndd ) , nsindex ) ;
/* Determine starting offset for label data */
offset = __le64_to_cpu ( nsindex - > labeloff ) ;
2018-10-12 04:25:20 +03:00
nslot = __le32_to_cpu ( nsindex - > nslot ) ;
2018-10-11 02:39:35 +03:00
/* Loop through the free list pulling in any active labels */
2018-10-12 04:25:20 +03:00
for ( i = 0 ; i < nslot ; i + + , offset + = ndd - > nslabel_size ) {
2018-10-11 02:39:35 +03:00
size_t label_read_size ;
/* zero out the unused labels */
if ( test_bit_le ( i , nsindex - > free ) ) {
memset ( ndd - > data + offset , 0 , ndd - > nslabel_size ) ;
continue ;
}
/* if we already read past here then just continue */
if ( offset + ndd - > nslabel_size < = read_size )
continue ;
/* if we haven't read in a while reset our read_size offset */
if ( read_size < offset )
read_size = offset ;
/* determine how much more will be read after this next call. */
label_read_size = offset + ndd - > nslabel_size - read_size ;
label_read_size = DIV_ROUND_UP ( label_read_size , max_xfer ) *
max_xfer ;
/* truncate last read if needed */
if ( read_size + label_read_size > config_size )
label_read_size = config_size - read_size ;
/* Read the label data */
rc = nvdimm_get_config_data ( ndd , ndd - > data + read_size ,
read_size , label_read_size ) ;
if ( rc )
goto out_err ;
/* push read_size to next read offset */
read_size + = label_read_size ;
}
dev_dbg ( ndd - > dev , " len: %zu rc: %d \n " , offset , rc ) ;
out_err :
return rc ;
2018-10-11 02:39:20 +03:00
}
2015-06-18 00:14:46 +03:00
int nd_label_active_count ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
int count = 0 ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return 0 ;
for_each_clear_bit_le ( slot , free , nslot ) {
struct nd_namespace_label * nd_label ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-06-18 00:14:46 +03:00
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) ) {
2015-06-18 00:14:46 +03:00
u32 label_slot = __le32_to_cpu ( nd_label - > slot ) ;
u64 size = __le64_to_cpu ( nd_label - > rawsize ) ;
u64 dpa = __le64_to_cpu ( nd_label - > dpa ) ;
dev_dbg ( ndd - > dev ,
2018-03-06 03:39:31 +03:00
" slot%d invalid slot: %d dpa: %llx size: %llx \n " ,
slot , label_slot , dpa , size ) ;
2015-06-18 00:14:46 +03:00
continue ;
}
count + + ;
}
return count ;
}
struct nd_namespace_label * nd_label_active ( struct nvdimm_drvdata * ndd , int n )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return NULL ;
for_each_clear_bit_le ( slot , free , nslot ) {
struct nd_namespace_label * nd_label ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) )
2015-06-18 00:14:46 +03:00
continue ;
if ( n - - = = 0 )
2017-06-03 12:30:43 +03:00
return to_label ( ndd , slot ) ;
2015-06-18 00:14:46 +03:00
}
return NULL ;
}
2015-05-30 19:36:02 +03:00
2015-05-30 19:35:36 +03:00
u32 nd_label_alloc_slot ( struct nvdimm_drvdata * ndd )
2015-05-30 19:36:02 +03:00
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return UINT_MAX ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
slot = find_next_bit_le ( free , nslot , 0 ) ;
if ( slot = = nslot )
return UINT_MAX ;
clear_bit_le ( slot , free ) ;
return slot ;
}
2015-05-30 19:35:36 +03:00
bool nd_label_free_slot ( struct nvdimm_drvdata * ndd , u32 slot )
2015-05-30 19:36:02 +03:00
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return false ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
if ( slot < nslot )
return ! test_and_set_bit_le ( slot , free ) ;
return false ;
}
u32 nd_label_nfree ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
2015-05-30 19:35:36 +03:00
return nvdimm_num_label_slots ( ndd ) ;
2015-05-30 19:36:02 +03:00
return bitmap_weight ( free , nslot ) ;
}
static int nd_label_write_index ( struct nvdimm_drvdata * ndd , int index , u32 seq ,
unsigned long flags )
{
struct nd_namespace_index * nsindex ;
unsigned long offset ;
u64 checksum ;
u32 nslot ;
int rc ;
nsindex = to_namespace_index ( ndd , index ) ;
if ( flags & ND_NSINDEX_INIT )
nslot = nvdimm_num_label_slots ( ndd ) ;
else
nslot = __le32_to_cpu ( nsindex - > nslot ) ;
memcpy ( nsindex - > sig , NSINDEX_SIGNATURE , NSINDEX_SIG_LEN ) ;
2017-06-03 12:30:43 +03:00
memset ( & nsindex - > flags , 0 , 3 ) ;
nsindex - > labelsize = sizeof_namespace_label ( ndd ) > > 8 ;
2015-05-30 19:36:02 +03:00
nsindex - > seq = __cpu_to_le32 ( seq ) ;
offset = ( unsigned long ) nsindex
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > myoff = __cpu_to_le64 ( offset ) ;
nsindex - > mysize = __cpu_to_le64 ( sizeof_namespace_index ( ndd ) ) ;
offset = ( unsigned long ) to_namespace_index ( ndd ,
nd_label_next_nsindex ( index ) )
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > otheroff = __cpu_to_le64 ( offset ) ;
offset = ( unsigned long ) nd_label_base ( ndd )
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > labeloff = __cpu_to_le64 ( offset ) ;
nsindex - > nslot = __cpu_to_le32 ( nslot ) ;
nsindex - > major = __cpu_to_le16 ( 1 ) ;
2017-06-07 20:19:46 +03:00
if ( sizeof_namespace_label ( ndd ) < 256 )
nsindex - > minor = __cpu_to_le16 ( 1 ) ;
else
nsindex - > minor = __cpu_to_le16 ( 2 ) ;
2015-05-30 19:36:02 +03:00
nsindex - > checksum = __cpu_to_le64 ( 0 ) ;
if ( flags & ND_NSINDEX_INIT ) {
unsigned long * free = ( unsigned long * ) nsindex - > free ;
u32 nfree = ALIGN ( nslot , BITS_PER_LONG ) ;
int last_bits , i ;
memset ( nsindex - > free , 0xff , nfree / 8 ) ;
for ( i = 0 , last_bits = nfree - nslot ; i < last_bits ; i + + )
clear_bit_le ( nslot + i , free ) ;
}
checksum = nd_fletcher64 ( nsindex , sizeof_namespace_index ( ndd ) , 1 ) ;
nsindex - > checksum = __cpu_to_le64 ( checksum ) ;
rc = nvdimm_set_config_data ( ndd , __le64_to_cpu ( nsindex - > myoff ) ,
nsindex , sizeof_namespace_index ( ndd ) ) ;
if ( rc < 0 )
return rc ;
if ( flags & ND_NSINDEX_INIT )
return 0 ;
/* copy the index we just wrote to the new 'next' */
WARN_ON ( index ! = ndd - > ns_next ) ;
nd_label_copy ( ndd , to_current_namespace_index ( ndd ) , nsindex ) ;
ndd - > ns_current = nd_label_next_nsindex ( ndd - > ns_current ) ;
ndd - > ns_next = nd_label_next_nsindex ( ndd - > ns_next ) ;
WARN_ON ( ndd - > ns_current = = ndd - > ns_next ) ;
return 0 ;
}
static unsigned long nd_label_offset ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
return ( unsigned long ) nd_label
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
}
2017-06-04 04:18:39 +03:00
enum nvdimm_claim_class to_nvdimm_cclass ( guid_t * guid )
{
if ( guid_equal ( guid , & nvdimm_btt_guid ) )
return NVDIMM_CCLASS_BTT ;
2017-06-28 23:25:00 +03:00
else if ( guid_equal ( guid , & nvdimm_btt2_guid ) )
return NVDIMM_CCLASS_BTT2 ;
2017-06-04 04:18:39 +03:00
else if ( guid_equal ( guid , & nvdimm_pfn_guid ) )
return NVDIMM_CCLASS_PFN ;
else if ( guid_equal ( guid , & nvdimm_dax_guid ) )
return NVDIMM_CCLASS_DAX ;
else if ( guid_equal ( guid , & guid_null ) )
return NVDIMM_CCLASS_NONE ;
return NVDIMM_CCLASS_UNKNOWN ;
}
static const guid_t * to_abstraction_guid ( enum nvdimm_claim_class claim_class ,
guid_t * target )
{
if ( claim_class = = NVDIMM_CCLASS_BTT )
return & nvdimm_btt_guid ;
2017-06-28 23:25:00 +03:00
else if ( claim_class = = NVDIMM_CCLASS_BTT2 )
return & nvdimm_btt2_guid ;
2017-06-04 04:18:39 +03:00
else if ( claim_class = = NVDIMM_CCLASS_PFN )
return & nvdimm_pfn_guid ;
else if ( claim_class = = NVDIMM_CCLASS_DAX )
return & nvdimm_dax_guid ;
else if ( claim_class = = NVDIMM_CCLASS_UNKNOWN ) {
/*
* If we ' re modifying a namespace for which we don ' t
* know the claim_class , don ' t touch the existing guid .
*/
return target ;
} else
return & guid_null ;
}
2019-05-01 07:51:21 +03:00
static void reap_victim ( struct nd_mapping * nd_mapping ,
struct nd_label_ent * victim )
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
u32 slot = to_slot ( ndd , victim - > label ) ;
dev_dbg ( ndd - > dev , " free: %d \n " , slot ) ;
nd_label_free_slot ( ndd , slot ) ;
victim - > label = NULL ;
}
2015-05-30 19:36:02 +03:00
static int __pmem_label_update ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , struct nd_namespace_pmem * nspm ,
2019-01-15 21:47:00 +03:00
int pos , unsigned long flags )
2015-05-30 19:36:02 +03:00
{
2017-06-04 04:18:39 +03:00
struct nd_namespace_common * ndns = & nspm - > nsio . common ;
2017-06-06 21:10:51 +03:00
struct nd_interleave_set * nd_set = nd_region - > nd_set ;
2015-05-30 19:36:02 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct nd_namespace_label * nd_label ;
struct nd_namespace_index * nsindex ;
2019-05-01 07:51:21 +03:00
struct nd_label_ent * label_ent ;
2016-10-06 07:13:23 +03:00
struct nd_label_id label_id ;
struct resource * res ;
2015-05-30 19:36:02 +03:00
unsigned long * free ;
u32 nslot , slot ;
size_t offset ;
2017-06-04 04:59:15 +03:00
u64 cookie ;
2015-05-30 19:36:02 +03:00
int rc ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return - ENXIO ;
2017-06-04 04:59:15 +03:00
cookie = nd_region_interleave_set_cookie ( nd_region , nsindex ) ;
2016-10-06 07:13:23 +03:00
nd_label_gen_id ( & label_id , nspm - > uuid , 0 ) ;
for_each_dpa_resource ( ndd , res )
if ( strcmp ( res - > name , label_id . id ) = = 0 )
break ;
if ( ! res ) {
WARN_ON_ONCE ( 1 ) ;
return - ENXIO ;
}
2015-05-30 19:36:02 +03:00
/* allocate and write the label to the staging (next) index */
slot = nd_label_alloc_slot ( ndd ) ;
if ( slot = = UINT_MAX )
return - ENXIO ;
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " allocated: %d \n " , slot ) ;
2015-05-30 19:36:02 +03:00
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
memset ( nd_label , 0 , sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:36:02 +03:00
memcpy ( nd_label - > uuid , nspm - > uuid , NSLABEL_UUID_LEN ) ;
if ( nspm - > alt_name )
memcpy ( nd_label - > name , nspm - > alt_name , NSLABEL_NAME_LEN ) ;
2019-01-15 21:47:00 +03:00
nd_label - > flags = __cpu_to_le32 ( flags ) ;
2015-05-30 19:36:02 +03:00
nd_label - > nlabel = __cpu_to_le16 ( nd_region - > ndr_mappings ) ;
nd_label - > position = __cpu_to_le16 ( pos ) ;
nd_label - > isetcookie = __cpu_to_le64 ( cookie ) ;
2016-10-06 07:13:23 +03:00
nd_label - > rawsize = __cpu_to_le64 ( resource_size ( res ) ) ;
2017-07-04 02:30:44 +03:00
nd_label - > lbasize = __cpu_to_le64 ( nspm - > lbasize ) ;
2016-10-06 07:13:23 +03:00
nd_label - > dpa = __cpu_to_le64 ( res - > start ) ;
2015-05-30 19:36:02 +03:00
nd_label - > slot = __cpu_to_le32 ( slot ) ;
2017-06-06 21:10:51 +03:00
if ( namespace_label_has ( ndd , type_guid ) )
guid_copy ( & nd_label - > type_guid , & nd_set - > type_guid ) ;
2017-06-04 04:18:39 +03:00
if ( namespace_label_has ( ndd , abstraction_guid ) )
guid_copy ( & nd_label - > abstraction_guid ,
to_abstraction_guid ( ndns - > claim_class ,
& nd_label - > abstraction_guid ) ) ;
2017-06-07 00:56:43 +03:00
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label , sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum ) ;
}
2018-03-06 03:39:31 +03:00
nd_dbg_dpa ( nd_region , ndd , res , " \n " ) ;
2015-05-30 19:36:02 +03:00
/* update label */
offset = nd_label_offset ( ndd , nd_label ) ;
rc = nvdimm_set_config_data ( ndd , offset , nd_label ,
2017-06-03 12:30:43 +03:00
sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:36:02 +03:00
if ( rc < 0 )
return rc ;
/* Garbage collect the previous label */
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
2016-10-06 07:13:23 +03:00
list_for_each_entry ( label_ent , & nd_mapping - > labels , list ) {
if ( ! label_ent - > label )
continue ;
2019-05-01 07:51:21 +03:00
if ( test_and_clear_bit ( ND_LABEL_REAP , & label_ent - > flags )
| | memcmp ( nspm - > uuid , label_ent - > label - > uuid ,
NSLABEL_UUID_LEN ) = = 0 )
reap_victim ( nd_mapping , label_ent ) ;
2015-05-30 19:36:02 +03:00
}
/* update index */
rc = nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
2016-10-06 07:13:23 +03:00
if ( rc = = 0 ) {
list_for_each_entry ( label_ent , & nd_mapping - > labels , list )
if ( ! label_ent - > label ) {
label_ent - > label = nd_label ;
nd_label = NULL ;
break ;
}
dev_WARN_ONCE ( & nspm - > nsio . common . dev , nd_label ,
" failed to track label: %d \n " ,
to_slot ( ndd , nd_label ) ) ;
if ( nd_label )
rc = - ENXIO ;
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
2016-09-20 02:04:21 +03:00
return rc ;
2015-05-30 19:35:36 +03:00
}
static bool is_old_resource ( struct resource * res , struct resource * * list , int n )
2015-05-30 19:36:02 +03:00
{
int i ;
2015-05-30 19:35:36 +03:00
if ( res - > flags & DPA_RESOURCE_ADJUSTED )
return false ;
for ( i = 0 ; i < n ; i + + )
if ( res = = list [ i ] )
return true ;
return false ;
}
static struct resource * to_resource ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
struct resource * res ;
for_each_dpa_resource ( ndd , res ) {
if ( res - > start ! = __le64_to_cpu ( nd_label - > dpa ) )
continue ;
if ( resource_size ( res ) ! = __le64_to_cpu ( nd_label - > rawsize ) )
continue ;
return res ;
}
return NULL ;
}
/*
* 1 / Account all the labels that can be freed after this update
* 2 / Allocate and write the label to the staging ( next ) index
* 3 / Record the resources in the namespace device
*/
static int __blk_label_update ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , struct nd_namespace_blk * nsblk ,
int num_labels )
{
2016-09-20 02:04:21 +03:00
int i , alloc , victims , nfree , old_num_resources , nlabel , rc = - ENXIO ;
2017-06-06 21:10:51 +03:00
struct nd_interleave_set * nd_set = nd_region - > nd_set ;
2017-06-04 04:18:39 +03:00
struct nd_namespace_common * ndns = & nsblk - > common ;
2015-05-30 19:35:36 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct nd_namespace_label * nd_label ;
2016-09-20 02:04:21 +03:00
struct nd_label_ent * label_ent , * e ;
2015-05-30 19:35:36 +03:00
struct nd_namespace_index * nsindex ;
unsigned long * free , * victim_map = NULL ;
struct resource * res , * * old_res_list ;
struct nd_label_id label_id ;
u8 uuid [ NSLABEL_UUID_LEN ] ;
2017-06-07 00:59:04 +03:00
int min_dpa_idx = 0 ;
2016-09-20 02:04:21 +03:00
LIST_HEAD ( list ) ;
2015-05-30 19:35:36 +03:00
u32 nslot , slot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return - ENXIO ;
old_res_list = nsblk - > res ;
nfree = nd_label_nfree ( ndd ) ;
old_num_resources = nsblk - > num_resources ;
nd_label_gen_id ( & label_id , nsblk - > uuid , NSLABEL_FLAG_LOCAL ) ;
/*
* We need to loop over the old resources a few times , which seems a
* bit inefficient , but we need to know that we have the label
* space before we start mutating the tracking structures .
* Otherwise the recovery method of last resort for userspace is
* disable and re - enable the parent region .
*/
alloc = 0 ;
for_each_dpa_resource ( ndd , res ) {
if ( strcmp ( res - > name , label_id . id ) ! = 0 )
continue ;
if ( ! is_old_resource ( res , old_res_list , old_num_resources ) )
alloc + + ;
}
victims = 0 ;
if ( old_num_resources ) {
/* convert old local-label-map to dimm-slot victim-map */
2018-08-30 13:32:07 +03:00
victim_map = bitmap_zalloc ( nslot , GFP_KERNEL ) ;
2015-05-30 19:35:36 +03:00
if ( ! victim_map )
return - ENOMEM ;
/* mark unused labels for garbage collection */
for_each_clear_bit_le ( slot , free , nslot ) {
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-05-30 19:35:36 +03:00
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
res = to_resource ( ndd , nd_label ) ;
if ( res & & is_old_resource ( res , old_res_list ,
old_num_resources ) )
continue ;
slot = to_slot ( ndd , nd_label ) ;
set_bit ( slot , victim_map ) ;
victims + + ;
}
}
/* don't allow updates that consume the last label */
if ( nfree - alloc < 0 | | nfree - alloc + victims < 1 ) {
2015-06-25 11:20:04 +03:00
dev_info ( & nsblk - > common . dev , " insufficient label space \n " ) ;
2018-08-30 13:32:07 +03:00
bitmap_free ( victim_map ) ;
2015-05-30 19:35:36 +03:00
return - ENOSPC ;
}
/* from here on we need to abort on error */
/* assign all resources to the namespace before writing the labels */
nsblk - > res = NULL ;
nsblk - > num_resources = 0 ;
for_each_dpa_resource ( ndd , res ) {
if ( strcmp ( res - > name , label_id . id ) ! = 0 )
continue ;
if ( ! nsblk_add_resource ( nd_region , ndd , nsblk , res - > start ) ) {
rc = - ENOMEM ;
goto abort ;
}
}
2017-06-07 00:59:04 +03:00
/*
* Find the resource associated with the first label in the set
* per the v1 .2 namespace specification .
*/
for ( i = 0 ; i < nsblk - > num_resources ; i + + ) {
struct resource * min = nsblk - > res [ min_dpa_idx ] ;
res = nsblk - > res [ i ] ;
if ( res - > start < min - > start )
min_dpa_idx = i ;
}
2015-05-30 19:35:36 +03:00
for ( i = 0 ; i < nsblk - > num_resources ; i + + ) {
size_t offset ;
res = nsblk - > res [ i ] ;
if ( is_old_resource ( res , old_res_list , old_num_resources ) )
continue ; /* carry-over */
slot = nd_label_alloc_slot ( ndd ) ;
if ( slot = = UINT_MAX )
goto abort ;
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " allocated: %d \n " , slot ) ;
2015-05-30 19:35:36 +03:00
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
memset ( nd_label , 0 , sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:35:36 +03:00
memcpy ( nd_label - > uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ;
if ( nsblk - > alt_name )
memcpy ( nd_label - > name , nsblk - > alt_name ,
NSLABEL_NAME_LEN ) ;
nd_label - > flags = __cpu_to_le32 ( NSLABEL_FLAG_LOCAL ) ;
2017-06-06 21:39:30 +03:00
/*
* Use the presence of the type_guid as a flag to
2017-06-07 00:59:04 +03:00
* determine isetcookie usage and nlabel + position
* policy for blk - aperture namespaces .
2017-06-06 21:39:30 +03:00
*/
2017-06-07 00:59:04 +03:00
if ( namespace_label_has ( ndd , type_guid ) ) {
if ( i = = min_dpa_idx ) {
nd_label - > nlabel = __cpu_to_le16 ( nsblk - > num_resources ) ;
nd_label - > position = __cpu_to_le16 ( 0 ) ;
} else {
nd_label - > nlabel = __cpu_to_le16 ( 0xffff ) ;
nd_label - > position = __cpu_to_le16 ( 0xffff ) ;
}
2017-06-06 21:39:30 +03:00
nd_label - > isetcookie = __cpu_to_le64 ( nd_set - > cookie2 ) ;
2017-06-07 00:59:04 +03:00
} else {
nd_label - > nlabel = __cpu_to_le16 ( 0 ) ; /* N/A */
nd_label - > position = __cpu_to_le16 ( 0 ) ; /* N/A */
2017-06-06 21:39:30 +03:00
nd_label - > isetcookie = __cpu_to_le64 ( 0 ) ; /* N/A */
2017-06-07 00:59:04 +03:00
}
2017-06-06 21:39:30 +03:00
2015-05-30 19:35:36 +03:00
nd_label - > dpa = __cpu_to_le64 ( res - > start ) ;
nd_label - > rawsize = __cpu_to_le64 ( resource_size ( res ) ) ;
nd_label - > lbasize = __cpu_to_le64 ( nsblk - > lbasize ) ;
nd_label - > slot = __cpu_to_le32 ( slot ) ;
2017-06-06 21:10:51 +03:00
if ( namespace_label_has ( ndd , type_guid ) )
guid_copy ( & nd_label - > type_guid , & nd_set - > type_guid ) ;
2017-06-04 04:18:39 +03:00
if ( namespace_label_has ( ndd , abstraction_guid ) )
guid_copy ( & nd_label - > abstraction_guid ,
to_abstraction_guid ( ndns - > claim_class ,
& nd_label - > abstraction_guid ) ) ;
2017-06-07 00:56:43 +03:00
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label ,
sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum ) ;
}
2015-05-30 19:35:36 +03:00
/* update label */
offset = nd_label_offset ( ndd , nd_label ) ;
rc = nvdimm_set_config_data ( ndd , offset , nd_label ,
2017-06-03 12:30:43 +03:00
sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:35:36 +03:00
if ( rc < 0 )
goto abort ;
}
/* free up now unused slots in the new index */
for_each_set_bit ( slot , victim_map , victim_map ? nslot : 0 ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " free: %d \n " , slot ) ;
2015-05-30 19:35:36 +03:00
nd_label_free_slot ( ndd , slot ) ;
}
/* update index */
rc = nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
if ( rc )
goto abort ;
/*
* Now that the on - dimm labels are up to date , fix up the tracking
* entries in nd_mapping - > labels
*/
nlabel = 0 ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry_safe ( label_ent , e , & nd_mapping - > labels , list ) {
nd_label = label_ent - > label ;
if ( ! nd_label )
continue ;
2015-05-30 19:35:36 +03:00
nlabel + + ;
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
nlabel - - ;
2016-09-20 02:04:21 +03:00
list_move ( & label_ent - > list , & list ) ;
label_ent - > label = NULL ;
2015-05-30 19:35:36 +03:00
}
2016-09-20 02:04:21 +03:00
list_splice_tail_init ( & list , & nd_mapping - > labels ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
if ( nlabel + nsblk - > num_resources > num_labels ) {
/*
* Bug , we can ' t end up with more resources than
* available labels
*/
WARN_ON_ONCE ( 1 ) ;
rc = - ENXIO ;
goto out ;
}
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
label_ent = list_first_entry_or_null ( & nd_mapping - > labels ,
typeof ( * label_ent ) , list ) ;
if ( ! label_ent ) {
WARN_ON ( 1 ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
rc = - ENXIO ;
goto out ;
}
2015-05-30 19:35:36 +03:00
for_each_clear_bit_le ( slot , free , nslot ) {
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-05-30 19:35:36 +03:00
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
res = to_resource ( ndd , nd_label ) ;
res - > flags & = ~ DPA_RESOURCE_ADJUSTED ;
2016-09-20 02:04:21 +03:00
dev_vdbg ( & nsblk - > common . dev , " assign label slot: %d \n " , slot ) ;
list_for_each_entry_from ( label_ent , & nd_mapping - > labels , list ) {
if ( label_ent - > label )
continue ;
label_ent - > label = nd_label ;
nd_label = NULL ;
break ;
}
if ( nd_label )
dev_WARN ( & nsblk - > common . dev ,
" failed to track label slot%d \n " , slot ) ;
2015-05-30 19:35:36 +03:00
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
out :
kfree ( old_res_list ) ;
2018-08-30 13:32:07 +03:00
bitmap_free ( victim_map ) ;
2015-05-30 19:35:36 +03:00
return rc ;
abort :
/*
* 1 / repair the allocated label bitmap in the index
* 2 / restore the resource list
*/
nd_label_copy ( ndd , nsindex , to_current_namespace_index ( ndd ) ) ;
kfree ( nsblk - > res ) ;
nsblk - > res = old_res_list ;
nsblk - > num_resources = old_num_resources ;
old_res_list = NULL ;
goto out ;
}
static int init_labels ( struct nd_mapping * nd_mapping , int num_labels )
{
2016-09-20 02:04:21 +03:00
int i , old_num_labels = 0 ;
struct nd_label_ent * label_ent ;
2015-05-30 19:36:02 +03:00
struct nd_namespace_index * nsindex ;
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry ( label_ent , & nd_mapping - > labels , list )
2015-05-30 19:35:36 +03:00
old_num_labels + + ;
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:36:02 +03:00
2015-05-30 19:35:36 +03:00
/*
* We need to preserve all the old labels for the mapping so
* they can be garbage collected after writing the new labels .
*/
2016-09-20 02:04:21 +03:00
for ( i = old_num_labels ; i < num_labels ; i + + ) {
label_ent = kzalloc ( sizeof ( * label_ent ) , GFP_KERNEL ) ;
if ( ! label_ent )
2015-05-30 19:35:36 +03:00
return - ENOMEM ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_add_tail ( & label_ent - > list , & nd_mapping - > labels ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
}
2015-05-30 19:36:02 +03:00
if ( ndd - > ns_current = = - 1 | | ndd - > ns_next = = - 1 )
/* pass */ ;
else
2015-05-30 19:35:36 +03:00
return max ( num_labels , old_num_labels ) ;
2015-05-30 19:36:02 +03:00
nsindex = to_namespace_index ( ndd , 0 ) ;
memset ( nsindex , 0 , ndd - > nsarea . config_size ) ;
for ( i = 0 ; i < 2 ; i + + ) {
2017-09-26 21:41:28 +03:00
int rc = nd_label_write_index ( ndd , i , 3 - i , ND_NSINDEX_INIT ) ;
2015-05-30 19:36:02 +03:00
if ( rc )
return rc ;
}
ndd - > ns_next = 1 ;
ndd - > ns_current = 0 ;
2015-05-30 19:35:36 +03:00
return max ( num_labels , old_num_labels ) ;
2015-05-30 19:36:02 +03:00
}
static int del_labels ( struct nd_mapping * nd_mapping , u8 * uuid )
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-09-20 02:04:21 +03:00
struct nd_label_ent * label_ent , * e ;
2015-05-30 19:36:02 +03:00
struct nd_namespace_index * nsindex ;
u8 label_uuid [ NSLABEL_UUID_LEN ] ;
unsigned long * free ;
2016-09-20 02:04:21 +03:00
LIST_HEAD ( list ) ;
2015-05-30 19:36:02 +03:00
u32 nslot , slot ;
2016-09-20 02:04:21 +03:00
int active = 0 ;
2015-05-30 19:36:02 +03:00
if ( ! uuid )
return 0 ;
/* no index || no labels == nothing to delete */
2016-09-20 02:04:21 +03:00
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
2015-05-30 19:36:02 +03:00
return 0 ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry_safe ( label_ent , e , & nd_mapping - > labels , list ) {
struct nd_namespace_label * nd_label = label_ent - > label ;
if ( ! nd_label )
continue ;
active + + ;
2015-05-30 19:36:02 +03:00
memcpy ( label_uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( label_uuid , uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
2016-09-20 02:04:21 +03:00
active - - ;
2015-05-30 19:36:02 +03:00
slot = to_slot ( ndd , nd_label ) ;
nd_label_free_slot ( ndd , slot ) ;
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " free: %d \n " , slot ) ;
2016-09-20 02:04:21 +03:00
list_move_tail ( & label_ent - > list , & list ) ;
label_ent - > label = NULL ;
2015-05-30 19:36:02 +03:00
}
2016-09-20 02:04:21 +03:00
list_splice_tail_init ( & list , & nd_mapping - > labels ) ;
2015-05-30 19:36:02 +03:00
2016-09-20 02:04:21 +03:00
if ( active = = 0 ) {
nd_mapping_free_labels ( nd_mapping ) ;
2018-03-06 03:39:31 +03:00
dev_dbg ( ndd - > dev , " no more active labels \n " ) ;
2015-05-30 19:36:02 +03:00
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:36:02 +03:00
return nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
}
int nd_pmem_namespace_label_update ( struct nd_region * nd_region ,
struct nd_namespace_pmem * nspm , resource_size_t size )
{
2019-01-15 21:47:00 +03:00
int i , rc ;
2015-05-30 19:36:02 +03:00
for ( i = 0 ; i < nd_region - > ndr_mappings ; i + + ) {
struct nd_mapping * nd_mapping = & nd_region - > mapping [ i ] ;
2016-10-06 07:13:23 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct resource * res ;
2019-01-15 21:47:00 +03:00
int count = 0 ;
2015-05-30 19:36:02 +03:00
if ( size = = 0 ) {
rc = del_labels ( nd_mapping , nspm - > uuid ) ;
if ( rc )
return rc ;
continue ;
}
2016-10-06 07:13:23 +03:00
for_each_dpa_resource ( ndd , res )
2016-10-29 14:28:52 +03:00
if ( strncmp ( res - > name , " pmem " , 4 ) = = 0 )
2016-10-06 07:13:23 +03:00
count + + ;
WARN_ON_ONCE ( ! count ) ;
rc = init_labels ( nd_mapping , count ) ;
2015-05-30 19:35:36 +03:00
if ( rc < 0 )
2015-05-30 19:36:02 +03:00
return rc ;
2019-01-15 21:47:00 +03:00
rc = __pmem_label_update ( nd_region , nd_mapping , nspm , i ,
NSLABEL_FLAG_UPDATING ) ;
if ( rc )
return rc ;
}
if ( size = = 0 )
return 0 ;
/* Clear the UPDATING flag per UEFI 2.7 expectations */
for ( i = 0 ; i < nd_region - > ndr_mappings ; i + + ) {
struct nd_mapping * nd_mapping = & nd_region - > mapping [ i ] ;
rc = __pmem_label_update ( nd_region , nd_mapping , nspm , i , 0 ) ;
2015-05-30 19:36:02 +03:00
if ( rc )
return rc ;
}
return 0 ;
}
2015-05-30 19:35:36 +03:00
int nd_blk_namespace_label_update ( struct nd_region * nd_region ,
struct nd_namespace_blk * nsblk , resource_size_t size )
{
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
struct resource * res ;
int count = 0 ;
if ( size = = 0 )
return del_labels ( nd_mapping , nsblk - > uuid ) ;
for_each_dpa_resource ( to_ndd ( nd_mapping ) , res )
count + + ;
count = init_labels ( nd_mapping , count ) ;
if ( count < 0 )
return count ;
return __blk_label_update ( nd_region , nd_mapping , nsblk , count ) ;
}
2017-06-04 04:18:39 +03:00
int __init nd_label_init ( void )
{
WARN_ON ( guid_parse ( NVDIMM_BTT_GUID , & nvdimm_btt_guid ) ) ;
2017-06-28 23:25:00 +03:00
WARN_ON ( guid_parse ( NVDIMM_BTT2_GUID , & nvdimm_btt2_guid ) ) ;
2017-06-04 04:18:39 +03:00
WARN_ON ( guid_parse ( NVDIMM_PFN_GUID , & nvdimm_pfn_guid ) ) ;
WARN_ON ( guid_parse ( NVDIMM_DAX_GUID , & nvdimm_dax_guid ) ) ;
return 0 ;
}