2020-09-01 11:33:26 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/errno.h>
# include <linux/gfp.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/memremap.h>
# include <linux/slab.h>
# include <asm/page.h>
2021-12-09 23:05:34 +03:00
# include <xen/balloon.h>
2020-09-01 11:33:26 +03:00
# include <xen/page.h>
# include <xen/xen.h>
static DEFINE_MUTEX ( list_lock ) ;
2020-12-07 11:36:14 +03:00
static struct page * page_list ;
2020-09-01 11:33:26 +03:00
static unsigned int list_count ;
2021-12-09 23:05:34 +03:00
static struct resource * target_resource ;
/*
* If arch is not happy with system " iomem_resource " being used for
* the region allocation it can provide it ' s own view by creating specific
* Xen resource with unused regions of guest physical address space provided
* by the hypervisor .
*/
int __weak __init arch_xen_unpopulated_init ( struct resource * * res )
{
* res = & iomem_resource ;
return 0 ;
}
2020-09-01 11:33:26 +03:00
static int fill_list ( unsigned int nr_pages )
{
struct dev_pagemap * pgmap ;
2021-12-09 23:05:34 +03:00
struct resource * res , * tmp_res = NULL ;
2020-09-01 11:33:26 +03:00
void * vaddr ;
unsigned int i , alloc_pages = round_up ( nr_pages , PAGES_PER_SECTION ) ;
2021-12-09 23:05:34 +03:00
struct range mhp_range ;
int ret ;
2020-10-14 02:50:29 +03:00
res = kzalloc ( sizeof ( * res ) , GFP_KERNEL ) ;
if ( ! res )
return - ENOMEM ;
2020-09-01 11:33:26 +03:00
2020-10-14 02:50:29 +03:00
res - > name = " Xen scratch " ;
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
2020-09-01 11:33:26 +03:00
2021-12-09 23:05:34 +03:00
mhp_range = mhp_get_pluggable_range ( true ) ;
ret = allocate_resource ( target_resource , res ,
alloc_pages * PAGE_SIZE , mhp_range . start , mhp_range . end ,
2020-09-01 11:33:26 +03:00
PAGES_PER_SECTION * PAGE_SIZE , NULL , NULL ) ;
if ( ret < 0 ) {
pr_err ( " Cannot allocate new IOMEM resource \n " ) ;
2020-10-14 02:50:29 +03:00
goto err_resource ;
2020-09-01 11:33:26 +03:00
}
2021-12-09 23:05:34 +03:00
/*
* Reserve the region previously allocated from Xen resource to avoid
* re - using it by someone else .
*/
if ( target_resource ! = & iomem_resource ) {
tmp_res = kzalloc ( sizeof ( * tmp_res ) , GFP_KERNEL ) ;
if ( ! tmp_res ) {
ret = - ENOMEM ;
goto err_insert ;
}
tmp_res - > name = res - > name ;
tmp_res - > start = res - > start ;
tmp_res - > end = res - > end ;
tmp_res - > flags = res - > flags ;
ret = request_resource ( & iomem_resource , tmp_res ) ;
if ( ret < 0 ) {
pr_err ( " Cannot request resource %pR (%d) \n " , tmp_res , ret ) ;
kfree ( tmp_res ) ;
goto err_insert ;
}
}
2020-12-15 06:07:21 +03:00
pgmap = kzalloc ( sizeof ( * pgmap ) , GFP_KERNEL ) ;
2021-05-08 05:19:13 +03:00
if ( ! pgmap ) {
ret = - ENOMEM ;
2020-12-15 06:07:21 +03:00
goto err_pgmap ;
2021-05-08 05:19:13 +03:00
}
2020-12-15 06:07:21 +03:00
pgmap - > type = MEMORY_DEVICE_GENERIC ;
2020-10-14 02:50:29 +03:00
pgmap - > range = ( struct range ) {
. start = res - > start ,
. end = res - > end ,
} ;
2020-10-14 02:50:34 +03:00
pgmap - > nr_range = 1 ;
2020-10-14 02:50:29 +03:00
pgmap - > owner = res ;
2020-09-01 11:33:26 +03:00
# ifdef CONFIG_XEN_HAVE_PVMMU
/*
* memremap will build page tables for the new memory so
* the p2m must contain invalid entries so the correct
* non - present PTEs will be written .
*
* If a failure occurs , the original ( identity ) p2m entries
* are not restored since this region is now known not to
* conflict with any devices .
*/
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) ) {
2020-10-14 02:50:29 +03:00
xen_pfn_t pfn = PFN_DOWN ( res - > start ) ;
2020-09-01 11:33:26 +03:00
for ( i = 0 ; i < alloc_pages ; i + + ) {
if ( ! set_phys_to_machine ( pfn + i , INVALID_P2M_ENTRY ) ) {
pr_warn ( " set_phys_to_machine() failed, no memory added \n " ) ;
2020-10-14 02:50:29 +03:00
ret = - ENOMEM ;
goto err_memremap ;
2020-09-01 11:33:26 +03:00
}
}
}
# endif
vaddr = memremap_pages ( pgmap , NUMA_NO_NODE ) ;
if ( IS_ERR ( vaddr ) ) {
pr_err ( " Cannot remap memory range \n " ) ;
2020-10-14 02:50:29 +03:00
ret = PTR_ERR ( vaddr ) ;
goto err_memremap ;
2020-09-01 11:33:26 +03:00
}
for ( i = 0 ; i < alloc_pages ; i + + ) {
struct page * pg = virt_to_page ( vaddr + PAGE_SIZE * i ) ;
2020-12-07 11:36:14 +03:00
pg - > zone_device_data = page_list ;
page_list = pg ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
return 0 ;
2020-10-14 02:50:29 +03:00
err_memremap :
kfree ( pgmap ) ;
err_pgmap :
2021-12-09 23:05:34 +03:00
if ( tmp_res ) {
release_resource ( tmp_res ) ;
kfree ( tmp_res ) ;
}
err_insert :
2020-12-15 06:07:21 +03:00
release_resource ( res ) ;
err_resource :
2020-10-14 02:50:29 +03:00
kfree ( res ) ;
return ret ;
2020-09-01 11:33:26 +03:00
}
/**
* xen_alloc_unpopulated_pages - alloc unpopulated pages
* @ nr_pages : Number of pages
* @ pages : pages returned
* @ return 0 on success , error otherwise
*/
int xen_alloc_unpopulated_pages ( unsigned int nr_pages , struct page * * pages )
{
unsigned int i ;
int ret = 0 ;
2021-12-09 23:05:34 +03:00
/*
* Fallback to default behavior if we do not have any suitable resource
* to allocate required region from and as the result we won ' t be able to
* construct pages .
*/
if ( ! target_resource )
return xen_alloc_ballooned_pages ( nr_pages , pages ) ;
2020-09-01 11:33:26 +03:00
mutex_lock ( & list_lock ) ;
if ( list_count < nr_pages ) {
ret = fill_list ( nr_pages - list_count ) ;
if ( ret )
goto out ;
}
for ( i = 0 ; i < nr_pages ; i + + ) {
2020-12-07 11:36:14 +03:00
struct page * pg = page_list ;
2020-09-01 11:33:26 +03:00
BUG_ON ( ! pg ) ;
2020-12-07 11:36:14 +03:00
page_list = pg - > zone_device_data ;
2020-09-01 11:33:26 +03:00
list_count - - ;
pages [ i ] = pg ;
# ifdef CONFIG_XEN_HAVE_PVMMU
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) ) {
ret = xen_alloc_p2m_entry ( page_to_pfn ( pg ) ) ;
if ( ret < 0 ) {
unsigned int j ;
for ( j = 0 ; j < = i ; j + + ) {
2020-12-07 11:36:14 +03:00
pages [ j ] - > zone_device_data = page_list ;
page_list = pages [ j ] ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
goto out ;
}
}
# endif
}
out :
mutex_unlock ( & list_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( xen_alloc_unpopulated_pages ) ;
/**
* xen_free_unpopulated_pages - return unpopulated pages
* @ nr_pages : Number of pages
* @ pages : pages to return
*/
void xen_free_unpopulated_pages ( unsigned int nr_pages , struct page * * pages )
{
unsigned int i ;
2021-12-09 23:05:34 +03:00
if ( ! target_resource ) {
xen_free_ballooned_pages ( nr_pages , pages ) ;
return ;
}
2020-09-01 11:33:26 +03:00
mutex_lock ( & list_lock ) ;
for ( i = 0 ; i < nr_pages ; i + + ) {
2020-12-07 11:36:14 +03:00
pages [ i ] - > zone_device_data = page_list ;
page_list = pages [ i ] ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
mutex_unlock ( & list_lock ) ;
}
EXPORT_SYMBOL ( xen_free_unpopulated_pages ) ;
# ifdef CONFIG_XEN_PV
static int __init init ( void )
{
unsigned int i ;
if ( ! xen_domain ( ) )
return - ENODEV ;
if ( ! xen_pv_domain ( ) )
return 0 ;
/*
* Initialize with pages from the extra memory regions ( see
* arch / x86 / xen / setup . c ) .
*/
for ( i = 0 ; i < XEN_EXTRA_MEM_MAX_REGIONS ; i + + ) {
unsigned int j ;
for ( j = 0 ; j < xen_extra_mem [ i ] . n_pfns ; j + + ) {
struct page * pg =
pfn_to_page ( xen_extra_mem [ i ] . start_pfn + j ) ;
2020-12-07 11:36:14 +03:00
pg - > zone_device_data = page_list ;
page_list = pg ;
2020-09-01 11:33:26 +03:00
list_count + + ;
}
}
return 0 ;
}
subsys_initcall ( init ) ;
# endif
2021-12-09 23:05:34 +03:00
static int __init unpopulated_init ( void )
{
int ret ;
if ( ! xen_domain ( ) )
return - ENODEV ;
ret = arch_xen_unpopulated_init ( & target_resource ) ;
if ( ret ) {
pr_err ( " xen:unpopulated: Cannot initialize target resource \n " ) ;
target_resource = NULL ;
}
return ret ;
}
early_initcall ( unpopulated_init ) ;