2020-09-01 11:33:26 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/errno.h>
# include <linux/gfp.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/memremap.h>
# include <linux/slab.h>
# include <asm/page.h>
# include <xen/page.h>
# include <xen/xen.h>
static DEFINE_MUTEX ( list_lock ) ;
2020-12-07 11:36:14 +03:00
static struct page * page_list ;
2020-09-01 11:33:26 +03:00
static unsigned int list_count ;
static int fill_list ( unsigned int nr_pages )
{
struct dev_pagemap * pgmap ;
2020-10-14 02:50:29 +03:00
struct resource * res ;
2020-09-01 11:33:26 +03:00
void * vaddr ;
unsigned int i , alloc_pages = round_up ( nr_pages , PAGES_PER_SECTION ) ;
2020-10-14 02:50:29 +03:00
int ret = - ENOMEM ;
res = kzalloc ( sizeof ( * res ) , GFP_KERNEL ) ;
if ( ! res )
return - ENOMEM ;
2020-09-01 11:33:26 +03:00
pgmap = kzalloc ( sizeof ( * pgmap ) , GFP_KERNEL ) ;
if ( ! pgmap )
2020-10-14 02:50:29 +03:00
goto err_pgmap ;
2020-09-01 11:33:26 +03:00
pgmap - > type = MEMORY_DEVICE_GENERIC ;
2020-10-14 02:50:29 +03:00
res - > name = " Xen scratch " ;
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
2020-09-01 11:33:26 +03:00
2020-10-14 02:50:29 +03:00
ret = allocate_resource ( & iomem_resource , res ,
2020-09-01 11:33:26 +03:00
alloc_pages * PAGE_SIZE , 0 , - 1 ,
PAGES_PER_SECTION * PAGE_SIZE , NULL , NULL ) ;
if ( ret < 0 ) {
pr_err ( " Cannot allocate new IOMEM resource \n " ) ;
2020-10-14 02:50:29 +03:00
goto err_resource ;
2020-09-01 11:33:26 +03:00
}
2020-10-14 02:50:29 +03:00
pgmap - > range = ( struct range ) {
. start = res - > start ,
. end = res - > end ,
} ;
2020-10-14 02:50:34 +03:00
pgmap - > nr_range = 1 ;
2020-10-14 02:50:29 +03:00
pgmap - > owner = res ;
2020-09-01 11:33:26 +03:00
# ifdef CONFIG_XEN_HAVE_PVMMU
/*
* memremap will build page tables for the new memory so
* the p2m must contain invalid entries so the correct
* non - present PTEs will be written .
*
* If a failure occurs , the original ( identity ) p2m entries
* are not restored since this region is now known not to
* conflict with any devices .
*/
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) ) {
2020-10-14 02:50:29 +03:00
xen_pfn_t pfn = PFN_DOWN ( res - > start ) ;
2020-09-01 11:33:26 +03:00
for ( i = 0 ; i < alloc_pages ; i + + ) {
if ( ! set_phys_to_machine ( pfn + i , INVALID_P2M_ENTRY ) ) {
pr_warn ( " set_phys_to_machine() failed, no memory added \n " ) ;
2020-10-14 02:50:29 +03:00
ret = - ENOMEM ;
goto err_memremap ;
2020-09-01 11:33:26 +03:00
}
}
}
# endif
vaddr = memremap_pages ( pgmap , NUMA_NO_NODE ) ;
if ( IS_ERR ( vaddr ) ) {
pr_err ( " Cannot remap memory range \n " ) ;
2020-10-14 02:50:29 +03:00
ret = PTR_ERR ( vaddr ) ;
goto err_memremap ;
2020-09-01 11:33:26 +03:00
}
for ( i = 0 ; i < alloc_pages ; i + + ) {
struct page * pg = virt_to_page ( vaddr + PAGE_SIZE * i ) ;
BUG_ON ( ! virt_addr_valid ( vaddr + PAGE_SIZE * i ) ) ;
2020-12-07 11:36:14 +03:00
pg - > zone_device_data = page_list ;
page_list = pg ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
return 0 ;
2020-10-14 02:50:29 +03:00
err_memremap :
release_resource ( res ) ;
err_resource :
kfree ( pgmap ) ;
err_pgmap :
kfree ( res ) ;
return ret ;
2020-09-01 11:33:26 +03:00
}
/**
* xen_alloc_unpopulated_pages - alloc unpopulated pages
* @ nr_pages : Number of pages
* @ pages : pages returned
* @ return 0 on success , error otherwise
*/
int xen_alloc_unpopulated_pages ( unsigned int nr_pages , struct page * * pages )
{
unsigned int i ;
int ret = 0 ;
mutex_lock ( & list_lock ) ;
if ( list_count < nr_pages ) {
ret = fill_list ( nr_pages - list_count ) ;
if ( ret )
goto out ;
}
for ( i = 0 ; i < nr_pages ; i + + ) {
2020-12-07 11:36:14 +03:00
struct page * pg = page_list ;
2020-09-01 11:33:26 +03:00
BUG_ON ( ! pg ) ;
2020-12-07 11:36:14 +03:00
page_list = pg - > zone_device_data ;
2020-09-01 11:33:26 +03:00
list_count - - ;
pages [ i ] = pg ;
# ifdef CONFIG_XEN_HAVE_PVMMU
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) ) {
ret = xen_alloc_p2m_entry ( page_to_pfn ( pg ) ) ;
if ( ret < 0 ) {
unsigned int j ;
for ( j = 0 ; j < = i ; j + + ) {
2020-12-07 11:36:14 +03:00
pages [ j ] - > zone_device_data = page_list ;
page_list = pages [ j ] ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
goto out ;
}
}
# endif
}
out :
mutex_unlock ( & list_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( xen_alloc_unpopulated_pages ) ;
/**
* xen_free_unpopulated_pages - return unpopulated pages
* @ nr_pages : Number of pages
* @ pages : pages to return
*/
void xen_free_unpopulated_pages ( unsigned int nr_pages , struct page * * pages )
{
unsigned int i ;
mutex_lock ( & list_lock ) ;
for ( i = 0 ; i < nr_pages ; i + + ) {
2020-12-07 11:36:14 +03:00
pages [ i ] - > zone_device_data = page_list ;
page_list = pages [ i ] ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
mutex_unlock ( & list_lock ) ;
}
EXPORT_SYMBOL ( xen_free_unpopulated_pages ) ;
# ifdef CONFIG_XEN_PV
static int __init init ( void )
{
unsigned int i ;
if ( ! xen_domain ( ) )
return - ENODEV ;
if ( ! xen_pv_domain ( ) )
return 0 ;
/*
* Initialize with pages from the extra memory regions ( see
* arch / x86 / xen / setup . c ) .
*/
for ( i = 0 ; i < XEN_EXTRA_MEM_MAX_REGIONS ; i + + ) {
unsigned int j ;
for ( j = 0 ; j < xen_extra_mem [ i ] . n_pfns ; j + + ) {
struct page * pg =
pfn_to_page ( xen_extra_mem [ i ] . start_pfn + j ) ;
2020-12-07 11:36:14 +03:00
pg - > zone_device_data = page_list ;
page_list = pg ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
}
return 0 ;
}
subsys_initcall ( init ) ;
# endif