2019-05-29 07:18:02 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2016-06-17 11:08:06 -07:00
/*
* Copyright ( c ) 2014 - 2016 , Intel Corporation .
*/
# include "test/nfit_test.h"
# include <linux/blkdev.h>
# include <pmem.h>
# include <nd.h>
2017-01-24 23:02:09 -08:00
long __pmem_direct_access ( struct pmem_device * pmem , pgoff_t pgoff ,
long nr_pages , void * * kaddr , pfn_t * pfn )
2016-06-17 11:08:06 -07:00
{
2017-01-24 23:02:09 -08:00
resource_size_t offset = PFN_PHYS ( pgoff ) + pmem - > data_offset ;
2016-06-17 11:08:06 -07:00
2017-01-24 23:02:09 -08:00
if ( unlikely ( is_bad_pmem ( & pmem - > bb , PFN_PHYS ( pgoff ) / 512 ,
PFN_PHYS ( nr_pages ) ) ) )
2016-06-17 11:08:06 -07:00
return - EIO ;
2016-06-15 20:34:17 -07:00
/*
* Limit dax to a single page at a time given vmalloc ( ) - backed
* in the nfit_test case .
*/
if ( get_nfit_res ( pmem - > phys_addr + offset ) ) {
struct page * page ;
2018-07-30 15:15:45 +08:00
if ( kaddr )
* kaddr = pmem - > virt_addr + offset ;
2016-06-15 20:34:17 -07:00
page = vmalloc_to_page ( pmem - > virt_addr + offset ) ;
2018-07-30 15:15:45 +08:00
if ( pfn )
* pfn = page_to_pfn_t ( page ) ;
2017-01-24 23:02:09 -08:00
pr_debug_ratelimited ( " %s: pmem: %p pgoff: %#lx pfn: %#lx \n " ,
__func__ , pmem , pgoff , page_to_pfn ( page ) ) ;
2016-06-15 20:34:17 -07:00
2017-01-24 23:02:09 -08:00
return 1 ;
2016-06-17 11:08:06 -07:00
}
2018-07-30 15:15:45 +08:00
if ( kaddr )
* kaddr = pmem - > virt_addr + offset ;
if ( pfn )
* pfn = phys_to_pfn_t ( pmem - > phys_addr + offset , pmem - > pfn_flags ) ;
2016-06-17 11:08:06 -07:00
/*
* If badblocks are present , limit known good range to the
* requested range .
*/
if ( unlikely ( pmem - > bb . count ) )
2017-01-24 23:02:09 -08:00
return nr_pages ;
return PHYS_PFN ( pmem - > size - pmem - > pfn_pad - offset ) ;
2016-06-17 11:08:06 -07:00
}