2015-03-11 17:49:56 +03:00
/*
* MMU operations common to all auto - translated physmap guests .
*
* Copyright ( C ) 2015 Citrix Systems R & D Ltd .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation ; or , when distributed
* separately from the Linux kernel or incorporated into other
* software packages , subject to the following license :
*
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this source file ( the " Software " ) , to deal in the Software without
* restriction , including without limitation the rights to use , copy , modify ,
* merge , publish , distribute , sublicense , and / or sell copies of the Software ,
* and to permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*/
# include <linux/kernel.h>
# include <linux/mm.h>
2016-04-07 15:03:19 +03:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2015-03-11 17:49:56 +03:00
# include <asm/xen/hypercall.h>
# include <asm/xen/hypervisor.h>
# include <xen/xen.h>
2018-11-27 17:23:27 +03:00
# include <xen/xen-ops.h>
2015-03-11 17:49:56 +03:00
# include <xen/page.h>
# include <xen/interface/xen.h>
# include <xen/interface/memory.h>
2016-04-07 15:03:19 +03:00
# include <xen/balloon.h>
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
typedef void ( * xen_gfn_fn_t ) ( unsigned long gfn , void * data ) ;
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
/* Break down the pages in 4KB chunk and call fn for each gfn */
static void xen_for_each_gfn ( struct page * * pages , unsigned nr_gfn ,
xen_gfn_fn_t fn , void * data )
{
unsigned long xen_pfn = 0 ;
struct page * page ;
int i ;
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
for ( i = 0 ; i < nr_gfn ; i + + ) {
if ( ( i % XEN_PFN_PER_PAGE ) = = 0 ) {
page = pages [ i / XEN_PFN_PER_PAGE ] ;
xen_pfn = page_to_xen_pfn ( page ) ;
}
fn ( pfn_to_gfn ( xen_pfn + + ) , data ) ;
}
2015-03-11 17:49:56 +03:00
}
struct remap_data {
2015-08-07 19:34:41 +03:00
xen_pfn_t * fgfn ; /* foreign domain's gfn */
2015-05-05 18:54:12 +03:00
int nr_fgfn ; /* Number of foreign gfn left to map */
2015-03-11 17:49:56 +03:00
pgprot_t prot ;
domid_t domid ;
struct vm_area_struct * vma ;
int index ;
struct page * * pages ;
2015-08-07 19:34:41 +03:00
struct xen_remap_gfn_info * info ;
2015-03-11 17:49:57 +03:00
int * err_ptr ;
int mapped ;
2015-05-05 18:54:12 +03:00
/* Hypercall parameters */
int h_errs [ XEN_PFN_PER_PAGE ] ;
xen_ulong_t h_idxs [ XEN_PFN_PER_PAGE ] ;
xen_pfn_t h_gpfns [ XEN_PFN_PER_PAGE ] ;
int h_iter ; /* Iterator */
2015-03-11 17:49:56 +03:00
} ;
2015-05-05 18:54:12 +03:00
static void setup_hparams ( unsigned long gfn , void * data )
{
struct remap_data * info = data ;
info - > h_idxs [ info - > h_iter ] = * info - > fgfn ;
info - > h_gpfns [ info - > h_iter ] = gfn ;
info - > h_errs [ info - > h_iter ] = 0 ;
info - > h_iter + + ;
info - > fgfn + + ;
}
2019-07-12 06:58:43 +03:00
static int remap_pte_fn ( pte_t * ptep , unsigned long addr , void * data )
2015-03-11 17:49:56 +03:00
{
struct remap_data * info = data ;
struct page * page = info - > pages [ info - > index + + ] ;
2015-05-05 18:54:12 +03:00
pte_t pte = pte_mkspecial ( pfn_pte ( page_to_pfn ( page ) , info - > prot ) ) ;
int rc , nr_gfn ;
uint32_t i ;
struct xen_add_to_physmap_range xatp = {
. domid = DOMID_SELF ,
. foreign_domid = info - > domid ,
. space = XENMAPSPACE_gmfn_foreign ,
} ;
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
nr_gfn = min_t ( typeof ( info - > nr_fgfn ) , XEN_PFN_PER_PAGE , info - > nr_fgfn ) ;
info - > nr_fgfn - = nr_gfn ;
info - > h_iter = 0 ;
xen_for_each_gfn ( & page , nr_gfn , setup_hparams , info ) ;
BUG_ON ( info - > h_iter ! = nr_gfn ) ;
set_xen_guest_handle ( xatp . idxs , info - > h_idxs ) ;
set_xen_guest_handle ( xatp . gpfns , info - > h_gpfns ) ;
set_xen_guest_handle ( xatp . errs , info - > h_errs ) ;
xatp . size = nr_gfn ;
rc = HYPERVISOR_memory_op ( XENMEM_add_to_physmap_range , & xatp ) ;
/* info->err_ptr expect to have one error status per Xen PFN */
for ( i = 0 ; i < nr_gfn ; i + + ) {
int err = ( rc < 0 ) ? rc : info - > h_errs [ i ] ;
* ( info - > err_ptr + + ) = err ;
if ( ! err )
info - > mapped + + ;
2015-03-11 17:49:57 +03:00
}
2015-05-05 18:54:12 +03:00
/*
* Note : The hypercall will return 0 in most of the case if even if
* all the fgmfn are not mapped . We still have to update the pte
* as the userspace may decide to continue .
*/
if ( ! rc )
set_pte_at ( info - > vma - > vm_mm , addr , ptep , pte ) ;
2015-03-11 17:49:56 +03:00
return 0 ;
}
2015-03-11 17:49:57 +03:00
int xen_xlate_remap_gfn_array ( struct vm_area_struct * vma ,
2015-03-11 17:49:56 +03:00
unsigned long addr ,
2015-08-07 19:34:41 +03:00
xen_pfn_t * gfn , int nr ,
2015-03-11 17:49:57 +03:00
int * err_ptr , pgprot_t prot ,
unsigned domid ,
2015-03-11 17:49:56 +03:00
struct page * * pages )
{
int err ;
struct remap_data data ;
2015-05-05 18:54:12 +03:00
unsigned long range = DIV_ROUND_UP ( nr , XEN_PFN_PER_PAGE ) < < PAGE_SHIFT ;
2015-03-11 17:49:56 +03:00
2015-03-11 17:49:57 +03:00
/* Kept here for the purpose of making sure code doesn't break
x86 PVOPS */
BUG_ON ( ! ( ( vma - > vm_flags & ( VM_PFNMAP | VM_IO ) ) = = ( VM_PFNMAP | VM_IO ) ) ) ;
2015-03-11 17:49:56 +03:00
2015-08-07 19:34:41 +03:00
data . fgfn = gfn ;
2015-05-05 18:54:12 +03:00
data . nr_fgfn = nr ;
2015-03-11 17:49:57 +03:00
data . prot = prot ;
2015-03-11 17:49:56 +03:00
data . domid = domid ;
2015-03-11 17:49:57 +03:00
data . vma = vma ;
2015-03-11 17:49:56 +03:00
data . pages = pages ;
2015-03-11 17:49:57 +03:00
data . index = 0 ;
data . err_ptr = err_ptr ;
data . mapped = 0 ;
err = apply_to_page_range ( vma - > vm_mm , addr , range ,
2015-03-11 17:49:56 +03:00
remap_pte_fn , & data ) ;
2015-03-11 17:49:57 +03:00
return err < 0 ? err : data . mapped ;
2015-03-11 17:49:56 +03:00
}
2015-03-11 17:49:57 +03:00
EXPORT_SYMBOL_GPL ( xen_xlate_remap_gfn_array ) ;
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
static void unmap_gfn ( unsigned long gfn , void * data )
2015-03-11 17:49:56 +03:00
{
2015-05-05 18:54:12 +03:00
struct xen_remove_from_physmap xrp ;
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
xrp . domid = DOMID_SELF ;
xrp . gpfn = gfn ;
( void ) HYPERVISOR_memory_op ( XENMEM_remove_from_physmap , & xrp ) ;
}
2015-03-11 17:49:56 +03:00
2015-05-05 18:54:12 +03:00
int xen_xlate_unmap_gfn_range ( struct vm_area_struct * vma ,
int nr , struct page * * pages )
{
xen_for_each_gfn ( pages , nr , unmap_gfn , NULL ) ;
2015-03-11 17:49:56 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( xen_xlate_unmap_gfn_range ) ;
2016-04-07 15:03:19 +03:00
2016-04-07 15:03:20 +03:00
struct map_balloon_pages {
xen_pfn_t * pfns ;
unsigned int idx ;
} ;
static void setup_balloon_gfn ( unsigned long gfn , void * data )
{
struct map_balloon_pages * info = data ;
info - > pfns [ info - > idx + + ] = gfn ;
}
2016-04-07 15:03:19 +03:00
/**
* xen_xlate_map_ballooned_pages - map a new set of ballooned pages
* @ gfns : returns the array of corresponding GFNs
* @ virt : returns the virtual address of the mapped region
* @ nr_grant_frames : number of GFNs
* @ return 0 on success , error otherwise
*
* This allocates a set of ballooned pages and maps them into the
* kernel ' s address space .
*/
int __init xen_xlate_map_ballooned_pages ( xen_pfn_t * * gfns , void * * virt ,
unsigned long nr_grant_frames )
{
struct page * * pages ;
xen_pfn_t * pfns ;
void * vaddr ;
2016-04-07 15:03:20 +03:00
struct map_balloon_pages data ;
2016-04-07 15:03:19 +03:00
int rc ;
2016-04-07 15:03:20 +03:00
unsigned long nr_pages ;
2016-04-07 15:03:19 +03:00
BUG_ON ( nr_grant_frames = = 0 ) ;
2016-04-07 15:03:20 +03:00
nr_pages = DIV_ROUND_UP ( nr_grant_frames , XEN_PFN_PER_PAGE ) ;
pages = kcalloc ( nr_pages , sizeof ( pages [ 0 ] ) , GFP_KERNEL ) ;
2016-04-07 15:03:19 +03:00
if ( ! pages )
return - ENOMEM ;
pfns = kcalloc ( nr_grant_frames , sizeof ( pfns [ 0 ] ) , GFP_KERNEL ) ;
if ( ! pfns ) {
kfree ( pages ) ;
return - ENOMEM ;
}
2016-04-07 15:03:20 +03:00
rc = alloc_xenballooned_pages ( nr_pages , pages ) ;
2016-04-07 15:03:19 +03:00
if ( rc ) {
2016-04-07 15:03:20 +03:00
pr_warn ( " %s Couldn't balloon alloc %ld pages rc:%d \n " , __func__ ,
nr_pages , rc ) ;
2016-04-07 15:03:19 +03:00
kfree ( pages ) ;
kfree ( pfns ) ;
return rc ;
}
2016-04-07 15:03:20 +03:00
data . pfns = pfns ;
data . idx = 0 ;
xen_for_each_gfn ( pages , nr_grant_frames , setup_balloon_gfn , & data ) ;
vaddr = vmap ( pages , nr_pages , 0 , PAGE_KERNEL ) ;
2016-04-07 15:03:19 +03:00
if ( ! vaddr ) {
2016-04-07 15:03:20 +03:00
pr_warn ( " %s Couldn't map %ld pages rc:%d \n " , __func__ ,
nr_pages , rc ) ;
free_xenballooned_pages ( nr_pages , pages ) ;
2016-04-07 15:03:19 +03:00
kfree ( pages ) ;
kfree ( pfns ) ;
return - ENOMEM ;
}
kfree ( pages ) ;
* gfns = pfns ;
* virt = vaddr ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( xen_xlate_map_ballooned_pages ) ;