2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-07-31 00:57:47 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*/
# include <linux/device.h>
# include <linux/sizes.h>
2020-12-15 19:35:31 +03:00
# include <linux/badblocks.h>
2015-07-31 00:57:47 +03:00
# include "nd-core.h"
2017-05-30 09:00:34 +03:00
# include "pmem.h"
2015-07-31 00:57:47 +03:00
# include "pfn.h"
# include "btt.h"
# include "nd.h"
void __nd_detach_ndns ( struct device * dev , struct nd_namespace_common * * _ndns )
{
struct nd_namespace_common * ndns = * _ndns ;
2017-04-29 08:05:14 +03:00
struct nvdimm_bus * nvdimm_bus ;
2015-07-31 00:57:47 +03:00
2017-04-29 08:05:14 +03:00
if ( ! ndns )
return ;
nvdimm_bus = walk_to_nvdimm_bus ( & ndns - > dev ) ;
lockdep_assert_held ( & nvdimm_bus - > reconfig_mutex ) ;
2016-12-16 07:04:31 +03:00
dev_WARN_ONCE ( dev , ndns - > claim ! = dev , " %s: invalid claim \n " , __func__ ) ;
2015-07-31 00:57:47 +03:00
ndns - > claim = NULL ;
* _ndns = NULL ;
put_device ( & ndns - > dev ) ;
}
void nd_detach_ndns ( struct device * dev ,
struct nd_namespace_common * * _ndns )
{
struct nd_namespace_common * ndns = * _ndns ;
if ( ! ndns )
return ;
get_device ( & ndns - > dev ) ;
2017-04-29 08:05:14 +03:00
nvdimm_bus_lock ( & ndns - > dev ) ;
2015-07-31 00:57:47 +03:00
__nd_detach_ndns ( dev , _ndns ) ;
2017-04-29 08:05:14 +03:00
nvdimm_bus_unlock ( & ndns - > dev ) ;
2015-07-31 00:57:47 +03:00
put_device ( & ndns - > dev ) ;
}
bool __nd_attach_ndns ( struct device * dev , struct nd_namespace_common * attach ,
struct nd_namespace_common * * _ndns )
{
2017-04-29 08:05:14 +03:00
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( & attach - > dev ) ;
2015-07-31 00:57:47 +03:00
if ( attach - > claim )
return false ;
2017-04-29 08:05:14 +03:00
lockdep_assert_held ( & nvdimm_bus - > reconfig_mutex ) ;
2016-12-16 07:04:31 +03:00
dev_WARN_ONCE ( dev , * _ndns , " %s: invalid claim \n " , __func__ ) ;
2015-07-31 00:57:47 +03:00
attach - > claim = dev ;
* _ndns = attach ;
get_device ( & attach - > dev ) ;
return true ;
}
bool nd_attach_ndns ( struct device * dev , struct nd_namespace_common * attach ,
struct nd_namespace_common * * _ndns )
{
bool claimed ;
2017-04-29 08:05:14 +03:00
nvdimm_bus_lock ( & attach - > dev ) ;
2015-07-31 00:57:47 +03:00
claimed = __nd_attach_ndns ( dev , attach , _ndns ) ;
2017-04-29 08:05:14 +03:00
nvdimm_bus_unlock ( & attach - > dev ) ;
2015-07-31 00:57:47 +03:00
return claimed ;
}
static int namespace_match ( struct device * dev , void * data )
{
char * name = data ;
return strcmp ( name , dev_name ( dev ) ) = = 0 ;
}
static bool is_idle ( struct device * dev , struct nd_namespace_common * ndns )
{
struct nd_region * nd_region = to_nd_region ( dev - > parent ) ;
struct device * seed = NULL ;
if ( is_nd_btt ( dev ) )
seed = nd_region - > btt_seed ;
else if ( is_nd_pfn ( dev ) )
seed = nd_region - > pfn_seed ;
2016-03-11 21:15:36 +03:00
else if ( is_nd_dax ( dev ) )
seed = nd_region - > dax_seed ;
2015-07-31 00:57:47 +03:00
if ( seed = = dev | | ndns | | dev - > driver )
return false ;
return true ;
}
2016-05-21 22:22:41 +03:00
struct nd_pfn * to_nd_pfn_safe ( struct device * dev )
{
/*
* pfn device attributes are re - used by dax device instances , so we
* need to be careful to correct device - to - nd_pfn conversion .
*/
if ( is_nd_pfn ( dev ) )
return to_nd_pfn ( dev ) ;
if ( is_nd_dax ( dev ) ) {
struct nd_dax * nd_dax = to_nd_dax ( dev ) ;
return & nd_dax - > nd_pfn ;
}
WARN_ON ( 1 ) ;
return NULL ;
}
2015-07-31 00:57:47 +03:00
static void nd_detach_and_reset ( struct device * dev ,
struct nd_namespace_common * * _ndns )
{
/* detach the namespace and destroy / reset the device */
2017-04-29 08:05:14 +03:00
__nd_detach_ndns ( dev , _ndns ) ;
2015-07-31 00:57:47 +03:00
if ( is_idle ( dev , * _ndns ) ) {
nd_device_unregister ( dev , ND_ASYNC ) ;
} else if ( is_nd_btt ( dev ) ) {
struct nd_btt * nd_btt = to_nd_btt ( dev ) ;
nd_btt - > lbasize = 0 ;
kfree ( nd_btt - > uuid ) ;
nd_btt - > uuid = NULL ;
2016-05-21 22:22:41 +03:00
} else if ( is_nd_pfn ( dev ) | | is_nd_dax ( dev ) ) {
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
kfree ( nd_pfn - > uuid ) ;
nd_pfn - > uuid = NULL ;
nd_pfn - > mode = PFN_MODE_NONE ;
}
}
ssize_t nd_namespace_store ( struct device * dev ,
struct nd_namespace_common * * _ndns , const char * buf ,
size_t len )
{
struct nd_namespace_common * ndns ;
struct device * found ;
char * name ;
if ( dev - > driver ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " namespace already active \n " ) ;
2015-07-31 00:57:47 +03:00
return - EBUSY ;
}
name = kstrndup ( buf , len , GFP_KERNEL ) ;
if ( ! name )
return - ENOMEM ;
strim ( name ) ;
if ( strncmp ( name , " namespace " , 9 ) = = 0 | | strcmp ( name , " " ) = = 0 )
/* pass */ ;
else {
len = - EINVAL ;
goto out ;
}
ndns = * _ndns ;
if ( strcmp ( name , " " ) = = 0 ) {
nd_detach_and_reset ( dev , _ndns ) ;
goto out ;
} else if ( ndns ) {
dev_dbg ( dev , " namespace already set to: %s \n " ,
dev_name ( & ndns - > dev ) ) ;
len = - EBUSY ;
goto out ;
}
found = device_find_child ( dev - > parent , name , namespace_match ) ;
if ( ! found ) {
dev_dbg ( dev , " '%s' not found under %s \n " , name ,
dev_name ( dev - > parent ) ) ;
len = - ENODEV ;
goto out ;
}
ndns = to_ndns ( found ) ;
2017-06-04 04:18:39 +03:00
switch ( ndns - > claim_class ) {
case NVDIMM_CCLASS_NONE :
break ;
case NVDIMM_CCLASS_BTT :
2017-06-28 23:25:00 +03:00
case NVDIMM_CCLASS_BTT2 :
2017-06-04 04:18:39 +03:00
if ( ! is_nd_btt ( dev ) ) {
len = - EBUSY ;
goto out_attach ;
}
break ;
case NVDIMM_CCLASS_PFN :
if ( ! is_nd_pfn ( dev ) ) {
len = - EBUSY ;
goto out_attach ;
}
break ;
case NVDIMM_CCLASS_DAX :
if ( ! is_nd_dax ( dev ) ) {
len = - EBUSY ;
goto out_attach ;
}
break ;
default :
len = - EBUSY ;
goto out_attach ;
break ;
}
2015-07-31 00:57:47 +03:00
if ( __nvdimm_namespace_capacity ( ndns ) < SZ_16M ) {
dev_dbg ( dev , " %s too small to host \n " , name ) ;
len = - ENXIO ;
goto out_attach ;
}
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( dev ) ) ;
2017-04-29 08:05:14 +03:00
if ( ! __nd_attach_ndns ( dev , ndns , _ndns ) ) {
2015-07-31 00:57:47 +03:00
dev_dbg ( dev , " %s already claimed \n " ,
dev_name ( & ndns - > dev ) ) ;
len = - EBUSY ;
}
out_attach :
put_device ( & ndns - > dev ) ; /* from device_find_child */
out :
kfree ( name ) ;
return len ;
}
/*
* nd_sb_checksum : compute checksum for a generic info block
*
* Returns a fletcher64 checksum of everything in the given info block
* except the last field ( since that ' s where the checksum lives ) .
*/
u64 nd_sb_checksum ( struct nd_gen_sb * nd_gen_sb )
{
u64 sum ;
__le64 sum_save ;
BUILD_BUG_ON ( sizeof ( struct btt_sb ) ! = SZ_4K ) ;
BUILD_BUG_ON ( sizeof ( struct nd_pfn_sb ) ! = SZ_4K ) ;
BUILD_BUG_ON ( sizeof ( struct nd_gen_sb ) ! = SZ_4K ) ;
sum_save = nd_gen_sb - > checksum ;
nd_gen_sb - > checksum = 0 ;
sum = nd_fletcher64 ( nd_gen_sb , sizeof ( * nd_gen_sb ) , 1 ) ;
nd_gen_sb - > checksum = sum_save ;
return sum ;
}
EXPORT_SYMBOL ( nd_sb_checksum ) ;
2016-03-22 10:22:16 +03:00
static int nsio_rw_bytes ( struct nd_namespace_common * ndns ,
2017-05-11 00:01:30 +03:00
resource_size_t offset , void * buf , size_t size , int rw ,
unsigned long flags )
2016-03-22 10:22:16 +03:00
{
struct nd_namespace_io * nsio = to_nd_namespace_io ( & ndns - > dev ) ;
2016-11-11 22:37:36 +03:00
unsigned int sz_align = ALIGN ( size + ( offset & ( 512 - 1 ) ) , 512 ) ;
sector_t sector = offset > > 9 ;
2019-07-05 17:03:22 +03:00
int rc = 0 , ret = 0 ;
2016-11-11 22:37:36 +03:00
if ( unlikely ( ! size ) )
return 0 ;
2016-03-22 10:22:16 +03:00
if ( unlikely ( offset + size > nsio - > size ) ) {
dev_WARN_ONCE ( & ndns - > dev , 1 , " request out of range \n " ) ;
return - EFAULT ;
}
if ( rw = = READ ) {
2016-11-11 22:37:36 +03:00
if ( unlikely ( is_bad_pmem ( & nsio - > bb , sector , sz_align ) ) )
2016-03-22 10:22:16 +03:00
return - EIO ;
x86, powerpc: Rename memcpy_mcsafe() to copy_mc_to_{user, kernel}()
In reaction to a proposal to introduce a memcpy_mcsafe_fast()
implementation Linus points out that memcpy_mcsafe() is poorly named
relative to communicating the scope of the interface. Specifically what
addresses are valid to pass as source, destination, and what faults /
exceptions are handled.
Of particular concern is that even though x86 might be able to handle
the semantics of copy_mc_to_user() with its common copy_user_generic()
implementation other archs likely need / want an explicit path for this
case:
On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote:
>
> On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote:
> >
> > However now I see that copy_user_generic() works for the wrong reason.
> > It works because the exception on the source address due to poison
> > looks no different than a write fault on the user address to the
> > caller, it's still just a short copy. So it makes copy_to_user() work
> > for the wrong reason relative to the name.
>
> Right.
>
> And it won't work that way on other architectures. On x86, we have a
> generic function that can take faults on either side, and we use it
> for both cases (and for the "in_user" case too), but that's an
> artifact of the architecture oddity.
>
> In fact, it's probably wrong even on x86 - because it can hide bugs -
> but writing those things is painful enough that everybody prefers
> having just one function.
Replace a single top-level memcpy_mcsafe() with either
copy_mc_to_user(), or copy_mc_to_kernel().
Introduce an x86 copy_mc_fragile() name as the rename for the
low-level x86 implementation formerly named memcpy_mcsafe(). It is used
as the slow / careful backend that is supplanted by a fast
copy_mc_generic() in a follow-on patch.
One side-effect of this reorganization is that separating copy_mc_64.S
to its own file means that perf no longer needs to track dependencies
for its memcpy_64.S benchmarks.
[ bp: Massage a bit. ]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Cc: <stable@vger.kernel.org>
Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com
Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
2020-10-06 06:40:16 +03:00
if ( copy_mc_to_kernel ( buf , nsio - > addr + offset , size ) ! = 0 )
2018-05-04 03:06:21 +03:00
return - EIO ;
2018-06-18 20:07:35 +03:00
return 0 ;
2016-12-04 21:45:13 +03:00
}
if ( unlikely ( is_bad_pmem ( & nsio - > bb , sector , sz_align ) ) ) {
2017-05-01 20:00:02 +03:00
if ( IS_ALIGNED ( offset , 512 ) & & IS_ALIGNED ( size , 512 )
2017-07-01 03:32:52 +03:00
& & ! ( flags & NVDIMM_IO_ATOMIC ) ) {
2016-12-04 21:45:13 +03:00
long cleared ;
2017-08-31 04:35:58 +03:00
might_sleep ( ) ;
2017-04-26 00:16:51 +03:00
cleared = nvdimm_clear_poison ( & ndns - > dev ,
nsio - > res . start + offset , size ) ;
2016-12-16 19:10:31 +03:00
if ( cleared < size )
2016-11-11 22:37:36 +03:00
rc = - EIO ;
2016-12-16 19:10:31 +03:00
if ( cleared > 0 & & cleared / 512 ) {
cleared / = 512 ;
badblocks_clear ( & nsio - > bb , sector , cleared ) ;
2016-12-04 21:45:13 +03:00
}
2017-05-30 09:00:34 +03:00
arch_invalidate_pmem ( nsio - > addr + offset , size ) ;
2016-12-04 21:45:13 +03:00
} else
rc = - EIO ;
2016-03-22 10:22:16 +03:00
}
2017-05-29 22:22:50 +03:00
memcpy_flushcache ( nsio - > addr + offset , buf , size ) ;
2019-07-05 17:03:22 +03:00
ret = nvdimm_flush ( to_nd_region ( ndns - > dev . parent ) , NULL ) ;
if ( ret )
rc = ret ;
2016-12-04 21:45:13 +03:00
2016-11-11 22:37:36 +03:00
return rc ;
2016-03-22 10:22:16 +03:00
}
2019-10-31 13:57:41 +03:00
int devm_nsio_enable ( struct device * dev , struct nd_namespace_io * nsio ,
resource_size_t size )
2016-03-22 10:22:16 +03:00
{
struct nd_namespace_common * ndns = & nsio - > common ;
2020-10-14 02:50:29 +03:00
struct range range = {
. start = nsio - > res . start ,
. end = nsio - > res . end ,
} ;
2016-03-22 10:22:16 +03:00
2019-10-31 13:57:41 +03:00
nsio - > size = size ;
2020-10-14 02:50:29 +03:00
if ( ! devm_request_mem_region ( dev , range . start , size ,
libnvdimm: use consistent naming for request_mem_region()
Here is an example /proc/iomem listing for a system with 2 namespaces,
one in "sector" mode and one in "memory" mode:
1fc000000-2fbffffff : Persistent Memory (legacy)
1fc000000-2fbffffff : namespace1.0
340000000-34fffffff : Persistent Memory
340000000-34fffffff : btt0.1
Here is the corresponding ndctl listing:
# ndctl list
[
{
"dev":"namespace1.0",
"mode":"memory",
"size":4294967296,
"blockdev":"pmem1"
},
{
"dev":"namespace0.0",
"mode":"sector",
"size":267091968,
"uuid":"f7594f86-badb-4592-875f-ded577da2eaf",
"sector_size":4096,
"blockdev":"pmem0s"
}
]
Notice that the ndctl listing is purely in terms of namespace devices,
while the iomem listing leaks the internal "btt0.1" implementation
detail. Given that ndctl requires the namespace device name to change
the mode, for example:
# ndctl create-namespace --reconfig=namespace0.0 --mode=raw --force
...use the namespace name in the iomem listing to keep the claiming
device name consistent across different mode settings.
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-11-28 22:15:18 +03:00
dev_name ( & ndns - > dev ) ) ) {
2020-10-14 02:50:29 +03:00
dev_warn ( dev , " could not reserve region %pR \n " , & nsio - > res ) ;
2016-03-22 10:22:16 +03:00
return - EBUSY ;
}
ndns - > rw_bytes = nsio_rw_bytes ;
if ( devm_init_badblocks ( dev , & nsio - > bb ) )
return - ENOMEM ;
nvdimm_badblocks_populate ( to_nd_region ( ndns - > dev . parent ) , & nsio - > bb ,
2020-10-14 02:50:29 +03:00
& range ) ;
2016-03-22 10:22:16 +03:00
2020-10-14 02:50:29 +03:00
nsio - > addr = devm_memremap ( dev , range . start , size , ARCH_MEMREMAP_PMEM ) ;
2016-05-27 23:28:31 +03:00
return PTR_ERR_OR_ZERO ( nsio - > addr ) ;
2016-03-22 10:22:16 +03:00
}
void devm_nsio_disable ( struct device * dev , struct nd_namespace_io * nsio )
{
struct resource * res = & nsio - > res ;
devm_memunmap ( dev , nsio - > addr ) ;
devm_exit_badblocks ( dev , & nsio - > bb ) ;
2019-10-31 13:57:41 +03:00
devm_release_mem_region ( dev , res - > start , nsio - > size ) ;
2016-03-22 10:22:16 +03:00
}