2015-06-09 23:09:36 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
# include <linux/device.h>
# include <linux/ndctl.h>
2015-05-30 19:36:02 +03:00
# include <linux/slab.h>
2015-06-09 23:09:36 +03:00
# include <linux/io.h>
# include <linux/nd.h>
# include "nd-core.h"
# include "label.h"
# include "nd.h"
static u32 best_seq ( u32 a , u32 b )
{
a & = NSINDEX_SEQ_MASK ;
b & = NSINDEX_SEQ_MASK ;
if ( a = = 0 | | a = = b )
return b ;
else if ( b = = 0 )
return a ;
else if ( nd_inc_seq ( a ) = = b )
return b ;
else
return a ;
}
2017-06-03 12:30:43 +03:00
unsigned sizeof_namespace_label ( struct nvdimm_drvdata * ndd )
{
return ndd - > nslabel_size ;
}
2015-06-09 23:09:36 +03:00
size_t sizeof_namespace_index ( struct nvdimm_drvdata * ndd )
{
u32 index_span ;
if ( ndd - > nsindex_size )
return ndd - > nsindex_size ;
/*
* The minimum index space is 512 bytes , with that amount of
* index we can describe ~ 1400 labels which is less than a byte
* of overhead per label . Round up to a byte of overhead per
* label and determine the size of the index region . Yes , this
* starts to waste space at larger config_sizes , but it ' s
* unlikely we ' ll ever see anything but 128 K .
*/
2017-06-03 12:30:43 +03:00
index_span = ndd - > nsarea . config_size / ( sizeof_namespace_label ( ndd ) + 1 ) ;
2015-06-09 23:09:36 +03:00
index_span / = NSINDEX_ALIGN * 2 ;
ndd - > nsindex_size = index_span * NSINDEX_ALIGN ;
return ndd - > nsindex_size ;
}
2015-05-30 19:35:36 +03:00
int nvdimm_num_label_slots ( struct nvdimm_drvdata * ndd )
2015-05-30 19:36:02 +03:00
{
2017-06-03 12:30:43 +03:00
return ndd - > nsarea . config_size / ( sizeof_namespace_label ( ndd ) + 1 ) ;
2015-05-30 19:36:02 +03:00
}
2017-06-03 12:30:43 +03:00
static int __nd_label_validate ( struct nvdimm_drvdata * ndd )
2015-06-09 23:09:36 +03:00
{
/*
* On media label format consists of two index blocks followed
* by an array of labels . None of these structures are ever
* updated in place . A sequence number tracks the current
* active index and the next one to write , while labels are
* written to free slots .
*
* + - - - - - - - - - - - - +
* | |
* | nsindex0 |
* | |
* + - - - - - - - - - - - - +
* | |
* | nsindex1 |
* | |
* + - - - - - - - - - - - - +
* | label0 |
* + - - - - - - - - - - - - +
* | label1 |
* + - - - - - - - - - - - - +
* | |
* . . . . nslot . . .
* | |
* + - - - - - - - - - - - - +
* | labelN |
* + - - - - - - - - - - - - +
*/
struct nd_namespace_index * nsindex [ ] = {
to_namespace_index ( ndd , 0 ) ,
to_namespace_index ( ndd , 1 ) ,
} ;
const int num_index = ARRAY_SIZE ( nsindex ) ;
struct device * dev = ndd - > dev ;
bool valid [ 2 ] = { 0 } ;
int i , num_valid = 0 ;
u32 seq ;
for ( i = 0 ; i < num_index ; i + + ) {
u32 nslot ;
u8 sig [ NSINDEX_SIG_LEN ] ;
u64 sum_save , sum , size ;
2017-06-03 12:30:43 +03:00
unsigned int version , labelsize ;
2015-06-09 23:09:36 +03:00
memcpy ( sig , nsindex [ i ] - > sig , NSINDEX_SIG_LEN ) ;
if ( memcmp ( sig , NSINDEX_SIGNATURE , NSINDEX_SIG_LEN ) ! = 0 ) {
dev_dbg ( dev , " %s: nsindex%d signature invalid \n " ,
__func__ , i ) ;
continue ;
}
2017-06-03 12:30:43 +03:00
/* label sizes larger than 128 arrived with v1.2 */
version = __le16_to_cpu ( nsindex [ i ] - > major ) * 100
+ __le16_to_cpu ( nsindex [ i ] - > minor ) ;
if ( version > = 102 )
labelsize = 1 < < ( 7 + nsindex [ i ] - > labelsize ) ;
else
labelsize = 128 ;
if ( labelsize ! = sizeof_namespace_label ( ndd ) ) {
dev_dbg ( dev , " %s: nsindex%d labelsize %d invalid \n " ,
__func__ , i , nsindex [ i ] - > labelsize ) ;
continue ;
}
2015-06-09 23:09:36 +03:00
sum_save = __le64_to_cpu ( nsindex [ i ] - > checksum ) ;
nsindex [ i ] - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nsindex [ i ] , sizeof_namespace_index ( ndd ) , 1 ) ;
nsindex [ i ] - > checksum = __cpu_to_le64 ( sum_save ) ;
if ( sum ! = sum_save ) {
dev_dbg ( dev , " %s: nsindex%d checksum invalid \n " ,
__func__ , i ) ;
continue ;
}
seq = __le32_to_cpu ( nsindex [ i ] - > seq ) ;
if ( ( seq & NSINDEX_SEQ_MASK ) = = 0 ) {
dev_dbg ( dev , " %s: nsindex%d sequence: %#x invalid \n " ,
__func__ , i , seq ) ;
continue ;
}
/* sanity check the index against expected values */
if ( __le64_to_cpu ( nsindex [ i ] - > myoff )
! = i * sizeof_namespace_index ( ndd ) ) {
dev_dbg ( dev , " %s: nsindex%d myoff: %#llx invalid \n " ,
__func__ , i , ( unsigned long long )
__le64_to_cpu ( nsindex [ i ] - > myoff ) ) ;
continue ;
}
if ( __le64_to_cpu ( nsindex [ i ] - > otheroff )
! = ( ! i ) * sizeof_namespace_index ( ndd ) ) {
dev_dbg ( dev , " %s: nsindex%d otheroff: %#llx invalid \n " ,
__func__ , i , ( unsigned long long )
__le64_to_cpu ( nsindex [ i ] - > otheroff ) ) ;
continue ;
}
size = __le64_to_cpu ( nsindex [ i ] - > mysize ) ;
if ( size > sizeof_namespace_index ( ndd )
| | size < sizeof ( struct nd_namespace_index ) ) {
dev_dbg ( dev , " %s: nsindex%d mysize: %#llx invalid \n " ,
__func__ , i , size ) ;
continue ;
}
nslot = __le32_to_cpu ( nsindex [ i ] - > nslot ) ;
2017-06-03 12:30:43 +03:00
if ( nslot * sizeof_namespace_label ( ndd )
2015-06-09 23:09:36 +03:00
+ 2 * sizeof_namespace_index ( ndd )
> ndd - > nsarea . config_size ) {
dev_dbg ( dev , " %s: nsindex%d nslot: %u invalid, config_size: %#x \n " ,
__func__ , i , nslot ,
ndd - > nsarea . config_size ) ;
continue ;
}
valid [ i ] = true ;
num_valid + + ;
}
switch ( num_valid ) {
case 0 :
break ;
case 1 :
for ( i = 0 ; i < num_index ; i + + )
if ( valid [ i ] )
return i ;
/* can't have num_valid > 0 but valid[] = { false, false } */
WARN_ON ( 1 ) ;
break ;
default :
/* pick the best index... */
seq = best_seq ( __le32_to_cpu ( nsindex [ 0 ] - > seq ) ,
__le32_to_cpu ( nsindex [ 1 ] - > seq ) ) ;
if ( seq = = ( __le32_to_cpu ( nsindex [ 1 ] - > seq ) & NSINDEX_SEQ_MASK ) )
return 1 ;
else
return 0 ;
break ;
}
return - 1 ;
}
2017-06-03 12:30:43 +03:00
int nd_label_validate ( struct nvdimm_drvdata * ndd )
{
/*
* In order to probe for and validate namespace index blocks we
* need to know the size of the labels , and we can ' t trust the
* size of the labels until we validate the index blocks .
* Resolve this dependency loop by probing for known label
* sizes .
*/
int label_size [ ] = { 256 , 128 } ;
int i , rc ;
for ( i = 0 ; i < ARRAY_SIZE ( label_size ) ; i + + ) {
ndd - > nslabel_size = label_size [ i ] ;
rc = __nd_label_validate ( ndd ) ;
if ( rc > = 0 )
return rc ;
}
return - 1 ;
}
2015-06-09 23:09:36 +03:00
void nd_label_copy ( struct nvdimm_drvdata * ndd , struct nd_namespace_index * dst ,
struct nd_namespace_index * src )
{
if ( dst & & src )
/* pass */ ;
else
return ;
memcpy ( dst , src , sizeof_namespace_index ( ndd ) ) ;
}
static struct nd_namespace_label * nd_label_base ( struct nvdimm_drvdata * ndd )
{
void * base = to_namespace_index ( ndd , 0 ) ;
return base + 2 * sizeof_namespace_index ( ndd ) ;
}
2015-05-30 19:36:02 +03:00
static int to_slot ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
2017-06-03 12:30:43 +03:00
unsigned long label , base ;
label = ( unsigned long ) nd_label ;
base = ( unsigned long ) nd_label_base ( ndd ) ;
return ( label - base ) / sizeof_namespace_label ( ndd ) ;
}
static struct nd_namespace_label * to_label ( struct nvdimm_drvdata * ndd , int slot )
{
unsigned long label , base ;
base = ( unsigned long ) nd_label_base ( ndd ) ;
label = base + sizeof_namespace_label ( ndd ) * slot ;
return ( struct nd_namespace_label * ) label ;
2015-05-30 19:36:02 +03:00
}
2015-06-09 23:09:36 +03:00
# define for_each_clear_bit_le(bit, addr, size) \
for ( ( bit ) = find_next_zero_bit_le ( ( addr ) , ( size ) , 0 ) ; \
( bit ) < ( size ) ; \
( bit ) = find_next_zero_bit_le ( ( addr ) , ( size ) , ( bit ) + 1 ) )
/**
2015-05-30 19:36:02 +03:00
* preamble_index - common variable initialization for nd_label_ * routines
2015-06-09 23:09:36 +03:00
* @ ndd : dimm container for the relevant label set
2015-05-30 19:36:02 +03:00
* @ idx : namespace_index index
2015-06-09 23:09:36 +03:00
* @ nsindex_out : on return set to the currently active namespace index
* @ free : on return set to the free label bitmap in the index
* @ nslot : on return set to the number of slots in the label space
*/
2015-05-30 19:36:02 +03:00
static bool preamble_index ( struct nvdimm_drvdata * ndd , int idx ,
2015-06-09 23:09:36 +03:00
struct nd_namespace_index * * nsindex_out ,
unsigned long * * free , u32 * nslot )
{
struct nd_namespace_index * nsindex ;
2015-05-30 19:36:02 +03:00
nsindex = to_namespace_index ( ndd , idx ) ;
2015-06-09 23:09:36 +03:00
if ( nsindex = = NULL )
return false ;
* free = ( unsigned long * ) nsindex - > free ;
* nslot = __le32_to_cpu ( nsindex - > nslot ) ;
* nsindex_out = nsindex ;
return true ;
}
2015-06-18 00:14:46 +03:00
char * nd_label_gen_id ( struct nd_label_id * label_id , u8 * uuid , u32 flags )
2015-06-09 23:09:36 +03:00
{
if ( ! label_id | | ! uuid )
return NULL ;
snprintf ( label_id - > id , ND_LABEL_ID_SIZE , " %s-%pUb " ,
flags & NSLABEL_FLAG_LOCAL ? " blk " : " pmem " , uuid ) ;
return label_id - > id ;
}
2015-05-30 19:36:02 +03:00
static bool preamble_current ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_index * * nsindex ,
unsigned long * * free , u32 * nslot )
{
return preamble_index ( ndd , ndd - > ns_current , nsindex ,
free , nslot ) ;
}
static bool preamble_next ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_index * * nsindex ,
unsigned long * * free , u32 * nslot )
{
return preamble_index ( ndd , ndd - > ns_next , nsindex ,
free , nslot ) ;
}
2017-06-07 00:56:43 +03:00
static bool slot_valid ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label , u32 slot )
2015-06-09 23:09:36 +03:00
{
/* check that we are written where we expect to be written */
if ( slot ! = __le32_to_cpu ( nd_label - > slot ) )
return false ;
/* check that DPA allocations are page aligned */
if ( ( __le64_to_cpu ( nd_label - > dpa )
| __le64_to_cpu ( nd_label - > rawsize ) ) % SZ_4K )
return false ;
2017-06-07 00:56:43 +03:00
/* check checksum */
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum , sum_save ;
sum_save = __le64_to_cpu ( nd_label - > checksum ) ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label , sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum_save ) ;
if ( sum ! = sum_save ) {
dev_dbg ( ndd - > dev , " %s fail checksum. slot: %d expect: %#llx \n " ,
__func__ , slot , sum ) ;
return false ;
}
}
2015-06-09 23:09:36 +03:00
return true ;
}
int nd_label_reserve_dpa ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return 0 ; /* no label, nothing to reserve */
for_each_clear_bit_le ( slot , free , nslot ) {
struct nd_namespace_label * nd_label ;
struct nd_region * nd_region = NULL ;
u8 label_uuid [ NSLABEL_UUID_LEN ] ;
struct nd_label_id label_id ;
struct resource * res ;
u32 flags ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-06-09 23:09:36 +03:00
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) )
2015-06-09 23:09:36 +03:00
continue ;
memcpy ( label_uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
flags = __le32_to_cpu ( nd_label - > flags ) ;
nd_label_gen_id ( & label_id , label_uuid , flags ) ;
res = nvdimm_allocate_dpa ( ndd , & label_id ,
__le64_to_cpu ( nd_label - > dpa ) ,
__le64_to_cpu ( nd_label - > rawsize ) ) ;
nd_dbg_dpa ( nd_region , ndd , res , " reserve \n " ) ;
if ( ! res )
return - EBUSY ;
}
return 0 ;
}
2015-06-18 00:14:46 +03:00
int nd_label_active_count ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
int count = 0 ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return 0 ;
for_each_clear_bit_le ( slot , free , nslot ) {
struct nd_namespace_label * nd_label ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-06-18 00:14:46 +03:00
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) ) {
2015-06-18 00:14:46 +03:00
u32 label_slot = __le32_to_cpu ( nd_label - > slot ) ;
u64 size = __le64_to_cpu ( nd_label - > rawsize ) ;
u64 dpa = __le64_to_cpu ( nd_label - > dpa ) ;
dev_dbg ( ndd - > dev ,
" %s: slot%d invalid slot: %d dpa: %llx size: %llx \n " ,
__func__ , slot , label_slot , dpa , size ) ;
continue ;
}
count + + ;
}
return count ;
}
struct nd_namespace_label * nd_label_active ( struct nvdimm_drvdata * ndd , int n )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_current ( ndd , & nsindex , & free , & nslot ) )
return NULL ;
for_each_clear_bit_le ( slot , free , nslot ) {
struct nd_namespace_label * nd_label ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2017-06-07 00:56:43 +03:00
if ( ! slot_valid ( ndd , nd_label , slot ) )
2015-06-18 00:14:46 +03:00
continue ;
if ( n - - = = 0 )
2017-06-03 12:30:43 +03:00
return to_label ( ndd , slot ) ;
2015-06-18 00:14:46 +03:00
}
return NULL ;
}
2015-05-30 19:36:02 +03:00
2015-05-30 19:35:36 +03:00
u32 nd_label_alloc_slot ( struct nvdimm_drvdata * ndd )
2015-05-30 19:36:02 +03:00
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot , slot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return UINT_MAX ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
slot = find_next_bit_le ( free , nslot , 0 ) ;
if ( slot = = nslot )
return UINT_MAX ;
clear_bit_le ( slot , free ) ;
return slot ;
}
2015-05-30 19:35:36 +03:00
bool nd_label_free_slot ( struct nvdimm_drvdata * ndd , u32 slot )
2015-05-30 19:36:02 +03:00
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return false ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
if ( slot < nslot )
return ! test_and_set_bit_le ( slot , free ) ;
return false ;
}
u32 nd_label_nfree ( struct nvdimm_drvdata * ndd )
{
struct nd_namespace_index * nsindex ;
unsigned long * free ;
u32 nslot ;
WARN_ON ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
2015-05-30 19:35:36 +03:00
return nvdimm_num_label_slots ( ndd ) ;
2015-05-30 19:36:02 +03:00
return bitmap_weight ( free , nslot ) ;
}
static int nd_label_write_index ( struct nvdimm_drvdata * ndd , int index , u32 seq ,
unsigned long flags )
{
struct nd_namespace_index * nsindex ;
unsigned long offset ;
u64 checksum ;
u32 nslot ;
int rc ;
nsindex = to_namespace_index ( ndd , index ) ;
if ( flags & ND_NSINDEX_INIT )
nslot = nvdimm_num_label_slots ( ndd ) ;
else
nslot = __le32_to_cpu ( nsindex - > nslot ) ;
memcpy ( nsindex - > sig , NSINDEX_SIGNATURE , NSINDEX_SIG_LEN ) ;
2017-06-03 12:30:43 +03:00
memset ( & nsindex - > flags , 0 , 3 ) ;
nsindex - > labelsize = sizeof_namespace_label ( ndd ) > > 8 ;
2015-05-30 19:36:02 +03:00
nsindex - > seq = __cpu_to_le32 ( seq ) ;
offset = ( unsigned long ) nsindex
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > myoff = __cpu_to_le64 ( offset ) ;
nsindex - > mysize = __cpu_to_le64 ( sizeof_namespace_index ( ndd ) ) ;
offset = ( unsigned long ) to_namespace_index ( ndd ,
nd_label_next_nsindex ( index ) )
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > otheroff = __cpu_to_le64 ( offset ) ;
offset = ( unsigned long ) nd_label_base ( ndd )
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
nsindex - > labeloff = __cpu_to_le64 ( offset ) ;
nsindex - > nslot = __cpu_to_le32 ( nslot ) ;
nsindex - > major = __cpu_to_le16 ( 1 ) ;
nsindex - > minor = __cpu_to_le16 ( 1 ) ;
nsindex - > checksum = __cpu_to_le64 ( 0 ) ;
if ( flags & ND_NSINDEX_INIT ) {
unsigned long * free = ( unsigned long * ) nsindex - > free ;
u32 nfree = ALIGN ( nslot , BITS_PER_LONG ) ;
int last_bits , i ;
memset ( nsindex - > free , 0xff , nfree / 8 ) ;
for ( i = 0 , last_bits = nfree - nslot ; i < last_bits ; i + + )
clear_bit_le ( nslot + i , free ) ;
}
checksum = nd_fletcher64 ( nsindex , sizeof_namespace_index ( ndd ) , 1 ) ;
nsindex - > checksum = __cpu_to_le64 ( checksum ) ;
rc = nvdimm_set_config_data ( ndd , __le64_to_cpu ( nsindex - > myoff ) ,
nsindex , sizeof_namespace_index ( ndd ) ) ;
if ( rc < 0 )
return rc ;
if ( flags & ND_NSINDEX_INIT )
return 0 ;
/* copy the index we just wrote to the new 'next' */
WARN_ON ( index ! = ndd - > ns_next ) ;
nd_label_copy ( ndd , to_current_namespace_index ( ndd ) , nsindex ) ;
ndd - > ns_current = nd_label_next_nsindex ( ndd - > ns_current ) ;
ndd - > ns_next = nd_label_next_nsindex ( ndd - > ns_next ) ;
WARN_ON ( ndd - > ns_current = = ndd - > ns_next ) ;
return 0 ;
}
static unsigned long nd_label_offset ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
return ( unsigned long ) nd_label
- ( unsigned long ) to_namespace_index ( ndd , 0 ) ;
}
static int __pmem_label_update ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , struct nd_namespace_pmem * nspm ,
int pos )
{
2017-06-06 21:10:51 +03:00
struct nd_interleave_set * nd_set = nd_region - > nd_set ;
2015-05-30 19:36:02 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-10-06 07:13:23 +03:00
struct nd_label_ent * label_ent , * victim = NULL ;
2015-05-30 19:36:02 +03:00
struct nd_namespace_label * nd_label ;
struct nd_namespace_index * nsindex ;
2016-10-06 07:13:23 +03:00
struct nd_label_id label_id ;
struct resource * res ;
2015-05-30 19:36:02 +03:00
unsigned long * free ;
u32 nslot , slot ;
size_t offset ;
2017-06-04 04:59:15 +03:00
u64 cookie ;
2015-05-30 19:36:02 +03:00
int rc ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return - ENXIO ;
2017-06-04 04:59:15 +03:00
cookie = nd_region_interleave_set_cookie ( nd_region , nsindex ) ;
2016-10-06 07:13:23 +03:00
nd_label_gen_id ( & label_id , nspm - > uuid , 0 ) ;
for_each_dpa_resource ( ndd , res )
if ( strcmp ( res - > name , label_id . id ) = = 0 )
break ;
if ( ! res ) {
WARN_ON_ONCE ( 1 ) ;
return - ENXIO ;
}
2015-05-30 19:36:02 +03:00
/* allocate and write the label to the staging (next) index */
slot = nd_label_alloc_slot ( ndd ) ;
if ( slot = = UINT_MAX )
return - ENXIO ;
dev_dbg ( ndd - > dev , " %s: allocated: %d \n " , __func__ , slot ) ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
memset ( nd_label , 0 , sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:36:02 +03:00
memcpy ( nd_label - > uuid , nspm - > uuid , NSLABEL_UUID_LEN ) ;
if ( nspm - > alt_name )
memcpy ( nd_label - > name , nspm - > alt_name , NSLABEL_NAME_LEN ) ;
nd_label - > flags = __cpu_to_le32 ( NSLABEL_FLAG_UPDATING ) ;
nd_label - > nlabel = __cpu_to_le16 ( nd_region - > ndr_mappings ) ;
nd_label - > position = __cpu_to_le16 ( pos ) ;
nd_label - > isetcookie = __cpu_to_le64 ( cookie ) ;
2016-10-06 07:13:23 +03:00
nd_label - > rawsize = __cpu_to_le64 ( resource_size ( res ) ) ;
nd_label - > dpa = __cpu_to_le64 ( res - > start ) ;
2015-05-30 19:36:02 +03:00
nd_label - > slot = __cpu_to_le32 ( slot ) ;
2017-06-06 21:10:51 +03:00
if ( namespace_label_has ( ndd , type_guid ) )
guid_copy ( & nd_label - > type_guid , & nd_set - > type_guid ) ;
2017-06-07 00:56:43 +03:00
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label , sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum ) ;
}
2016-10-06 07:13:23 +03:00
nd_dbg_dpa ( nd_region , ndd , res , " %s \n " , __func__ ) ;
2015-05-30 19:36:02 +03:00
/* update label */
offset = nd_label_offset ( ndd , nd_label ) ;
rc = nvdimm_set_config_data ( ndd , offset , nd_label ,
2017-06-03 12:30:43 +03:00
sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:36:02 +03:00
if ( rc < 0 )
return rc ;
/* Garbage collect the previous label */
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
2016-10-06 07:13:23 +03:00
list_for_each_entry ( label_ent , & nd_mapping - > labels , list ) {
if ( ! label_ent - > label )
continue ;
if ( memcmp ( nspm - > uuid , label_ent - > label - > uuid ,
NSLABEL_UUID_LEN ) ! = 0 )
continue ;
victim = label_ent ;
list_move_tail ( & victim - > list , & nd_mapping - > labels ) ;
break ;
}
if ( victim ) {
2015-05-30 19:36:02 +03:00
dev_dbg ( ndd - > dev , " %s: free: %d \n " , __func__ , slot ) ;
2016-10-06 07:13:23 +03:00
slot = to_slot ( ndd , victim - > label ) ;
nd_label_free_slot ( ndd , slot ) ;
victim - > label = NULL ;
2015-05-30 19:36:02 +03:00
}
/* update index */
rc = nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
2016-10-06 07:13:23 +03:00
if ( rc = = 0 ) {
list_for_each_entry ( label_ent , & nd_mapping - > labels , list )
if ( ! label_ent - > label ) {
label_ent - > label = nd_label ;
nd_label = NULL ;
break ;
}
dev_WARN_ONCE ( & nspm - > nsio . common . dev , nd_label ,
" failed to track label: %d \n " ,
to_slot ( ndd , nd_label ) ) ;
if ( nd_label )
rc = - ENXIO ;
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
2016-09-20 02:04:21 +03:00
return rc ;
2015-05-30 19:35:36 +03:00
}
static bool is_old_resource ( struct resource * res , struct resource * * list , int n )
2015-05-30 19:36:02 +03:00
{
int i ;
2015-05-30 19:35:36 +03:00
if ( res - > flags & DPA_RESOURCE_ADJUSTED )
return false ;
for ( i = 0 ; i < n ; i + + )
if ( res = = list [ i ] )
return true ;
return false ;
}
static struct resource * to_resource ( struct nvdimm_drvdata * ndd ,
struct nd_namespace_label * nd_label )
{
struct resource * res ;
for_each_dpa_resource ( ndd , res ) {
if ( res - > start ! = __le64_to_cpu ( nd_label - > dpa ) )
continue ;
if ( resource_size ( res ) ! = __le64_to_cpu ( nd_label - > rawsize ) )
continue ;
return res ;
}
return NULL ;
}
/*
* 1 / Account all the labels that can be freed after this update
* 2 / Allocate and write the label to the staging ( next ) index
* 3 / Record the resources in the namespace device
*/
static int __blk_label_update ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , struct nd_namespace_blk * nsblk ,
int num_labels )
{
2016-09-20 02:04:21 +03:00
int i , alloc , victims , nfree , old_num_resources , nlabel , rc = - ENXIO ;
2017-06-06 21:10:51 +03:00
struct nd_interleave_set * nd_set = nd_region - > nd_set ;
2015-05-30 19:35:36 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct nd_namespace_label * nd_label ;
2016-09-20 02:04:21 +03:00
struct nd_label_ent * label_ent , * e ;
2015-05-30 19:35:36 +03:00
struct nd_namespace_index * nsindex ;
unsigned long * free , * victim_map = NULL ;
struct resource * res , * * old_res_list ;
struct nd_label_id label_id ;
u8 uuid [ NSLABEL_UUID_LEN ] ;
2017-06-07 00:59:04 +03:00
int min_dpa_idx = 0 ;
2016-09-20 02:04:21 +03:00
LIST_HEAD ( list ) ;
2015-05-30 19:35:36 +03:00
u32 nslot , slot ;
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
return - ENXIO ;
old_res_list = nsblk - > res ;
nfree = nd_label_nfree ( ndd ) ;
old_num_resources = nsblk - > num_resources ;
nd_label_gen_id ( & label_id , nsblk - > uuid , NSLABEL_FLAG_LOCAL ) ;
/*
* We need to loop over the old resources a few times , which seems a
* bit inefficient , but we need to know that we have the label
* space before we start mutating the tracking structures .
* Otherwise the recovery method of last resort for userspace is
* disable and re - enable the parent region .
*/
alloc = 0 ;
for_each_dpa_resource ( ndd , res ) {
if ( strcmp ( res - > name , label_id . id ) ! = 0 )
continue ;
if ( ! is_old_resource ( res , old_res_list , old_num_resources ) )
alloc + + ;
}
victims = 0 ;
if ( old_num_resources ) {
/* convert old local-label-map to dimm-slot victim-map */
victim_map = kcalloc ( BITS_TO_LONGS ( nslot ) , sizeof ( long ) ,
GFP_KERNEL ) ;
if ( ! victim_map )
return - ENOMEM ;
/* mark unused labels for garbage collection */
for_each_clear_bit_le ( slot , free , nslot ) {
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-05-30 19:35:36 +03:00
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
res = to_resource ( ndd , nd_label ) ;
if ( res & & is_old_resource ( res , old_res_list ,
old_num_resources ) )
continue ;
slot = to_slot ( ndd , nd_label ) ;
set_bit ( slot , victim_map ) ;
victims + + ;
}
}
/* don't allow updates that consume the last label */
if ( nfree - alloc < 0 | | nfree - alloc + victims < 1 ) {
2015-06-25 11:20:04 +03:00
dev_info ( & nsblk - > common . dev , " insufficient label space \n " ) ;
2015-05-30 19:35:36 +03:00
kfree ( victim_map ) ;
return - ENOSPC ;
}
/* from here on we need to abort on error */
/* assign all resources to the namespace before writing the labels */
nsblk - > res = NULL ;
nsblk - > num_resources = 0 ;
for_each_dpa_resource ( ndd , res ) {
if ( strcmp ( res - > name , label_id . id ) ! = 0 )
continue ;
if ( ! nsblk_add_resource ( nd_region , ndd , nsblk , res - > start ) ) {
rc = - ENOMEM ;
goto abort ;
}
}
2017-06-07 00:59:04 +03:00
/*
* Find the resource associated with the first label in the set
* per the v1 .2 namespace specification .
*/
for ( i = 0 ; i < nsblk - > num_resources ; i + + ) {
struct resource * min = nsblk - > res [ min_dpa_idx ] ;
res = nsblk - > res [ i ] ;
if ( res - > start < min - > start )
min_dpa_idx = i ;
}
2015-05-30 19:35:36 +03:00
for ( i = 0 ; i < nsblk - > num_resources ; i + + ) {
size_t offset ;
res = nsblk - > res [ i ] ;
if ( is_old_resource ( res , old_res_list , old_num_resources ) )
continue ; /* carry-over */
slot = nd_label_alloc_slot ( ndd ) ;
if ( slot = = UINT_MAX )
goto abort ;
dev_dbg ( ndd - > dev , " %s: allocated: %d \n " , __func__ , slot ) ;
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
memset ( nd_label , 0 , sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:35:36 +03:00
memcpy ( nd_label - > uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ;
if ( nsblk - > alt_name )
memcpy ( nd_label - > name , nsblk - > alt_name ,
NSLABEL_NAME_LEN ) ;
nd_label - > flags = __cpu_to_le32 ( NSLABEL_FLAG_LOCAL ) ;
2017-06-06 21:39:30 +03:00
/*
* Use the presence of the type_guid as a flag to
2017-06-07 00:59:04 +03:00
* determine isetcookie usage and nlabel + position
* policy for blk - aperture namespaces .
2017-06-06 21:39:30 +03:00
*/
2017-06-07 00:59:04 +03:00
if ( namespace_label_has ( ndd , type_guid ) ) {
if ( i = = min_dpa_idx ) {
nd_label - > nlabel = __cpu_to_le16 ( nsblk - > num_resources ) ;
nd_label - > position = __cpu_to_le16 ( 0 ) ;
} else {
nd_label - > nlabel = __cpu_to_le16 ( 0xffff ) ;
nd_label - > position = __cpu_to_le16 ( 0xffff ) ;
}
2017-06-06 21:39:30 +03:00
nd_label - > isetcookie = __cpu_to_le64 ( nd_set - > cookie2 ) ;
2017-06-07 00:59:04 +03:00
} else {
nd_label - > nlabel = __cpu_to_le16 ( 0 ) ; /* N/A */
nd_label - > position = __cpu_to_le16 ( 0 ) ; /* N/A */
2017-06-06 21:39:30 +03:00
nd_label - > isetcookie = __cpu_to_le64 ( 0 ) ; /* N/A */
2017-06-07 00:59:04 +03:00
}
2017-06-06 21:39:30 +03:00
2015-05-30 19:35:36 +03:00
nd_label - > dpa = __cpu_to_le64 ( res - > start ) ;
nd_label - > rawsize = __cpu_to_le64 ( resource_size ( res ) ) ;
nd_label - > lbasize = __cpu_to_le64 ( nsblk - > lbasize ) ;
nd_label - > slot = __cpu_to_le32 ( slot ) ;
2017-06-06 21:10:51 +03:00
if ( namespace_label_has ( ndd , type_guid ) )
guid_copy ( & nd_label - > type_guid , & nd_set - > type_guid ) ;
2017-06-07 00:56:43 +03:00
if ( namespace_label_has ( ndd , checksum ) ) {
u64 sum ;
nd_label - > checksum = __cpu_to_le64 ( 0 ) ;
sum = nd_fletcher64 ( nd_label ,
sizeof_namespace_label ( ndd ) , 1 ) ;
nd_label - > checksum = __cpu_to_le64 ( sum ) ;
}
2015-05-30 19:35:36 +03:00
/* update label */
offset = nd_label_offset ( ndd , nd_label ) ;
rc = nvdimm_set_config_data ( ndd , offset , nd_label ,
2017-06-03 12:30:43 +03:00
sizeof_namespace_label ( ndd ) ) ;
2015-05-30 19:35:36 +03:00
if ( rc < 0 )
goto abort ;
}
/* free up now unused slots in the new index */
for_each_set_bit ( slot , victim_map , victim_map ? nslot : 0 ) {
dev_dbg ( ndd - > dev , " %s: free: %d \n " , __func__ , slot ) ;
nd_label_free_slot ( ndd , slot ) ;
}
/* update index */
rc = nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
if ( rc )
goto abort ;
/*
* Now that the on - dimm labels are up to date , fix up the tracking
* entries in nd_mapping - > labels
*/
nlabel = 0 ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry_safe ( label_ent , e , & nd_mapping - > labels , list ) {
nd_label = label_ent - > label ;
if ( ! nd_label )
continue ;
2015-05-30 19:35:36 +03:00
nlabel + + ;
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
nlabel - - ;
2016-09-20 02:04:21 +03:00
list_move ( & label_ent - > list , & list ) ;
label_ent - > label = NULL ;
2015-05-30 19:35:36 +03:00
}
2016-09-20 02:04:21 +03:00
list_splice_tail_init ( & list , & nd_mapping - > labels ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
if ( nlabel + nsblk - > num_resources > num_labels ) {
/*
* Bug , we can ' t end up with more resources than
* available labels
*/
WARN_ON_ONCE ( 1 ) ;
rc = - ENXIO ;
goto out ;
}
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
label_ent = list_first_entry_or_null ( & nd_mapping - > labels ,
typeof ( * label_ent ) , list ) ;
if ( ! label_ent ) {
WARN_ON ( 1 ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
rc = - ENXIO ;
goto out ;
}
2015-05-30 19:35:36 +03:00
for_each_clear_bit_le ( slot , free , nslot ) {
2017-06-03 12:30:43 +03:00
nd_label = to_label ( ndd , slot ) ;
2015-05-30 19:35:36 +03:00
memcpy ( uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( uuid , nsblk - > uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
res = to_resource ( ndd , nd_label ) ;
res - > flags & = ~ DPA_RESOURCE_ADJUSTED ;
2016-09-20 02:04:21 +03:00
dev_vdbg ( & nsblk - > common . dev , " assign label slot: %d \n " , slot ) ;
list_for_each_entry_from ( label_ent , & nd_mapping - > labels , list ) {
if ( label_ent - > label )
continue ;
label_ent - > label = nd_label ;
nd_label = NULL ;
break ;
}
if ( nd_label )
dev_WARN ( & nsblk - > common . dev ,
" failed to track label slot%d \n " , slot ) ;
2015-05-30 19:35:36 +03:00
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
out :
kfree ( old_res_list ) ;
kfree ( victim_map ) ;
return rc ;
abort :
/*
* 1 / repair the allocated label bitmap in the index
* 2 / restore the resource list
*/
nd_label_copy ( ndd , nsindex , to_current_namespace_index ( ndd ) ) ;
kfree ( nsblk - > res ) ;
nsblk - > res = old_res_list ;
nsblk - > num_resources = old_num_resources ;
old_res_list = NULL ;
goto out ;
}
static int init_labels ( struct nd_mapping * nd_mapping , int num_labels )
{
2016-09-20 02:04:21 +03:00
int i , old_num_labels = 0 ;
struct nd_label_ent * label_ent ;
2015-05-30 19:36:02 +03:00
struct nd_namespace_index * nsindex ;
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry ( label_ent , & nd_mapping - > labels , list )
2015-05-30 19:35:36 +03:00
old_num_labels + + ;
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:36:02 +03:00
2015-05-30 19:35:36 +03:00
/*
* We need to preserve all the old labels for the mapping so
* they can be garbage collected after writing the new labels .
*/
2016-09-20 02:04:21 +03:00
for ( i = old_num_labels ; i < num_labels ; i + + ) {
label_ent = kzalloc ( sizeof ( * label_ent ) , GFP_KERNEL ) ;
if ( ! label_ent )
2015-05-30 19:35:36 +03:00
return - ENOMEM ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_add_tail ( & label_ent - > list , & nd_mapping - > labels ) ;
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:35:36 +03:00
}
2015-05-30 19:36:02 +03:00
if ( ndd - > ns_current = = - 1 | | ndd - > ns_next = = - 1 )
/* pass */ ;
else
2015-05-30 19:35:36 +03:00
return max ( num_labels , old_num_labels ) ;
2015-05-30 19:36:02 +03:00
nsindex = to_namespace_index ( ndd , 0 ) ;
memset ( nsindex , 0 , ndd - > nsarea . config_size ) ;
for ( i = 0 ; i < 2 ; i + + ) {
int rc = nd_label_write_index ( ndd , i , i * 2 , ND_NSINDEX_INIT ) ;
if ( rc )
return rc ;
}
ndd - > ns_next = 1 ;
ndd - > ns_current = 0 ;
2015-05-30 19:35:36 +03:00
return max ( num_labels , old_num_labels ) ;
2015-05-30 19:36:02 +03:00
}
static int del_labels ( struct nd_mapping * nd_mapping , u8 * uuid )
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-09-20 02:04:21 +03:00
struct nd_label_ent * label_ent , * e ;
2015-05-30 19:36:02 +03:00
struct nd_namespace_index * nsindex ;
u8 label_uuid [ NSLABEL_UUID_LEN ] ;
unsigned long * free ;
2016-09-20 02:04:21 +03:00
LIST_HEAD ( list ) ;
2015-05-30 19:36:02 +03:00
u32 nslot , slot ;
2016-09-20 02:04:21 +03:00
int active = 0 ;
2015-05-30 19:36:02 +03:00
if ( ! uuid )
return 0 ;
/* no index || no labels == nothing to delete */
2016-09-20 02:04:21 +03:00
if ( ! preamble_next ( ndd , & nsindex , & free , & nslot ) )
2015-05-30 19:36:02 +03:00
return 0 ;
2016-09-20 02:04:21 +03:00
mutex_lock ( & nd_mapping - > lock ) ;
list_for_each_entry_safe ( label_ent , e , & nd_mapping - > labels , list ) {
struct nd_namespace_label * nd_label = label_ent - > label ;
if ( ! nd_label )
continue ;
active + + ;
2015-05-30 19:36:02 +03:00
memcpy ( label_uuid , nd_label - > uuid , NSLABEL_UUID_LEN ) ;
if ( memcmp ( label_uuid , uuid , NSLABEL_UUID_LEN ) ! = 0 )
continue ;
2016-09-20 02:04:21 +03:00
active - - ;
2015-05-30 19:36:02 +03:00
slot = to_slot ( ndd , nd_label ) ;
nd_label_free_slot ( ndd , slot ) ;
dev_dbg ( ndd - > dev , " %s: free: %d \n " , __func__ , slot ) ;
2016-09-20 02:04:21 +03:00
list_move_tail ( & label_ent - > list , & list ) ;
label_ent - > label = NULL ;
2015-05-30 19:36:02 +03:00
}
2016-09-20 02:04:21 +03:00
list_splice_tail_init ( & list , & nd_mapping - > labels ) ;
2015-05-30 19:36:02 +03:00
2016-09-20 02:04:21 +03:00
if ( active = = 0 ) {
nd_mapping_free_labels ( nd_mapping ) ;
dev_dbg ( ndd - > dev , " %s: no more active labels \n " , __func__ ) ;
2015-05-30 19:36:02 +03:00
}
2016-09-20 02:04:21 +03:00
mutex_unlock ( & nd_mapping - > lock ) ;
2015-05-30 19:36:02 +03:00
return nd_label_write_index ( ndd , ndd - > ns_next ,
nd_inc_seq ( __le32_to_cpu ( nsindex - > seq ) ) , 0 ) ;
}
int nd_pmem_namespace_label_update ( struct nd_region * nd_region ,
struct nd_namespace_pmem * nspm , resource_size_t size )
{
int i ;
for ( i = 0 ; i < nd_region - > ndr_mappings ; i + + ) {
struct nd_mapping * nd_mapping = & nd_region - > mapping [ i ] ;
2016-10-06 07:13:23 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct resource * res ;
int rc , count = 0 ;
2015-05-30 19:36:02 +03:00
if ( size = = 0 ) {
rc = del_labels ( nd_mapping , nspm - > uuid ) ;
if ( rc )
return rc ;
continue ;
}
2016-10-06 07:13:23 +03:00
for_each_dpa_resource ( ndd , res )
2016-10-29 14:28:52 +03:00
if ( strncmp ( res - > name , " pmem " , 4 ) = = 0 )
2016-10-06 07:13:23 +03:00
count + + ;
WARN_ON_ONCE ( ! count ) ;
rc = init_labels ( nd_mapping , count ) ;
2015-05-30 19:35:36 +03:00
if ( rc < 0 )
2015-05-30 19:36:02 +03:00
return rc ;
rc = __pmem_label_update ( nd_region , nd_mapping , nspm , i ) ;
if ( rc )
return rc ;
}
return 0 ;
}
2015-05-30 19:35:36 +03:00
int nd_blk_namespace_label_update ( struct nd_region * nd_region ,
struct nd_namespace_blk * nsblk , resource_size_t size )
{
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
struct resource * res ;
int count = 0 ;
if ( size = = 0 )
return del_labels ( nd_mapping , nsblk - > uuid ) ;
for_each_dpa_resource ( to_ndd ( nd_mapping ) , res )
count + + ;
count = init_labels ( nd_mapping , count ) ;
if ( count < 0 )
return count ;
return __blk_label_update ( nd_region , nd_mapping , nsblk , count ) ;
}