2016-06-17 11:08:06 -07:00
/*
* Copyright ( c ) 2014 - 2016 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*/
# include "test/nfit_test.h"
# include <linux/blkdev.h>
# include <pmem.h>
# include <nd.h>
2017-01-24 23:02:09 -08:00
long __pmem_direct_access ( struct pmem_device * pmem , pgoff_t pgoff ,
long nr_pages , void * * kaddr , pfn_t * pfn )
2016-06-17 11:08:06 -07:00
{
2017-01-24 23:02:09 -08:00
resource_size_t offset = PFN_PHYS ( pgoff ) + pmem - > data_offset ;
2016-06-17 11:08:06 -07:00
2017-01-24 23:02:09 -08:00
if ( unlikely ( is_bad_pmem ( & pmem - > bb , PFN_PHYS ( pgoff ) / 512 ,
PFN_PHYS ( nr_pages ) ) ) )
2016-06-17 11:08:06 -07:00
return - EIO ;
2016-06-15 20:34:17 -07:00
/*
* Limit dax to a single page at a time given vmalloc ( ) - backed
* in the nfit_test case .
*/
if ( get_nfit_res ( pmem - > phys_addr + offset ) ) {
struct page * page ;
* kaddr = pmem - > virt_addr + offset ;
page = vmalloc_to_page ( pmem - > virt_addr + offset ) ;
* pfn = page_to_pfn_t ( page ) ;
2017-01-24 23:02:09 -08:00
pr_debug_ratelimited ( " %s: pmem: %p pgoff: %#lx pfn: %#lx \n " ,
__func__ , pmem , pgoff , page_to_pfn ( page ) ) ;
2016-06-15 20:34:17 -07:00
2017-01-24 23:02:09 -08:00
return 1 ;
2016-06-17 11:08:06 -07:00
}
* kaddr = pmem - > virt_addr + offset ;
* pfn = phys_to_pfn_t ( pmem - > phys_addr + offset , pmem - > pfn_flags ) ;
/*
* If badblocks are present , limit known good range to the
* requested range .
*/
if ( unlikely ( pmem - > bb . count ) )
2017-01-24 23:02:09 -08:00
return nr_pages ;
return PHYS_PFN ( pmem - > size - pmem - > pfn_pad - offset ) ;
2016-06-17 11:08:06 -07:00
}