2008-03-25 18:47:20 +01:00
/*
2012-07-20 11:15:04 +02:00
* access guest memory
2008-03-25 18:47:20 +01:00
*
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2008 , 2009
2008-03-25 18:47:20 +01:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License ( version 2 only )
* as published by the Free Software Foundation .
*
* Author ( s ) : Carsten Otte < cotte @ de . ibm . com >
*/
# ifndef __KVM_S390_GACCESS_H
# define __KVM_S390_GACCESS_H
# include <linux/compiler.h>
# include <linux/kvm_host.h>
# include <asm/uaccess.h>
2009-05-25 13:40:51 +02:00
# include "kvm-s390.h"
2008-03-25 18:47:20 +01:00
static inline void __user * __guestaddr_to_user ( struct kvm_vcpu * vcpu ,
2008-07-25 15:51:00 +02:00
unsigned long guestaddr )
2008-03-25 18:47:20 +01:00
{
2008-07-25 15:51:00 +02:00
unsigned long prefix = vcpu - > arch . sie_block - > prefix ;
2013-03-05 13:14:40 +01:00
unsigned long uaddress ;
2008-03-25 18:47:20 +01:00
if ( guestaddr < 2 * PAGE_SIZE )
guestaddr + = prefix ;
else if ( ( guestaddr > = prefix ) & & ( guestaddr < prefix + 2 * PAGE_SIZE ) )
guestaddr - = prefix ;
2013-03-05 13:14:40 +01:00
uaddress = gmap_fault ( guestaddr , vcpu - > arch . gmap ) ;
if ( IS_ERR_VALUE ( uaddress ) )
uaddress = - EFAULT ;
return ( void __user * ) uaddress ;
2008-03-25 18:47:20 +01:00
}
2008-07-25 15:51:00 +02:00
static inline int get_guest_u64 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u64 * result )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 7 ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
2008-07-25 15:51:00 +02:00
return get_user ( * result , ( unsigned long __user * ) uptr ) ;
2008-03-25 18:47:20 +01:00
}
2008-07-25 15:51:00 +02:00
static inline int get_guest_u32 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u32 * result )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 3 ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return get_user ( * result , ( u32 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int get_guest_u16 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u16 * result )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 1 ) ;
if ( IS_ERR ( uptr ) )
return PTR_ERR ( uptr ) ;
return get_user ( * result , ( u16 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int get_guest_u8 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u8 * result )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return get_user ( * result , ( u8 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int put_guest_u64 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u64 value )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 7 ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return put_user ( value , ( u64 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int put_guest_u32 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u32 value )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 3 ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return put_user ( value , ( u32 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int put_guest_u16 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u16 value )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
BUG_ON ( guestaddr & 1 ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return put_user ( value , ( u16 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int put_guest_u8 ( struct kvm_vcpu * vcpu , unsigned long guestaddr ,
2008-03-25 18:47:20 +01:00
u8 value )
{
void __user * uptr = __guestaddr_to_user ( vcpu , guestaddr ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
return put_user ( value , ( u8 __user * ) uptr ) ;
}
2008-07-25 15:51:00 +02:00
static inline int __copy_to_guest_slow ( struct kvm_vcpu * vcpu ,
unsigned long guestdest ,
2011-07-24 10:48:22 +02:00
void * from , unsigned long n )
2008-03-25 18:47:20 +01:00
{
int rc ;
unsigned long i ;
2011-07-24 10:48:22 +02:00
u8 * data = from ;
2008-03-25 18:47:20 +01:00
for ( i = 0 ; i < n ; i + + ) {
rc = put_guest_u8 ( vcpu , guestdest + + , * ( data + + ) ) ;
if ( rc < 0 )
return rc ;
}
return 0 ;
}
2011-07-24 10:48:22 +02:00
static inline int __copy_to_guest_fast ( struct kvm_vcpu * vcpu ,
unsigned long guestdest ,
void * from , unsigned long n )
{
int r ;
void __user * uptr ;
unsigned long size ;
if ( guestdest + n < guestdest )
return - EFAULT ;
/* simple case: all within one segment table entry? */
if ( ( guestdest & PMD_MASK ) = = ( ( guestdest + n ) & PMD_MASK ) ) {
uptr = ( void __user * ) gmap_fault ( guestdest , vcpu - > arch . gmap ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
r = copy_to_user ( uptr , from , n ) ;
if ( r )
r = - EFAULT ;
goto out ;
}
/* copy first segment */
uptr = ( void __user * ) gmap_fault ( guestdest , vcpu - > arch . gmap ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
size = PMD_SIZE - ( guestdest & ~ PMD_MASK ) ;
r = copy_to_user ( uptr , from , size ) ;
if ( r ) {
r = - EFAULT ;
goto out ;
}
from + = size ;
n - = size ;
guestdest + = size ;
/* copy full segments */
while ( n > = PMD_SIZE ) {
uptr = ( void __user * ) gmap_fault ( guestdest , vcpu - > arch . gmap ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
r = copy_to_user ( uptr , from , PMD_SIZE ) ;
if ( r ) {
r = - EFAULT ;
goto out ;
}
from + = PMD_SIZE ;
n - = PMD_SIZE ;
guestdest + = PMD_SIZE ;
}
/* copy the tail segment */
if ( n ) {
uptr = ( void __user * ) gmap_fault ( guestdest , vcpu - > arch . gmap ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
r = copy_to_user ( uptr , from , n ) ;
if ( r )
r = - EFAULT ;
}
out :
return r ;
}
static inline int copy_to_guest_absolute ( struct kvm_vcpu * vcpu ,
unsigned long guestdest ,
void * from , unsigned long n )
{
return __copy_to_guest_fast ( vcpu , guestdest , from , n ) ;
}
2008-07-25 15:51:00 +02:00
static inline int copy_to_guest ( struct kvm_vcpu * vcpu , unsigned long guestdest ,
2011-07-24 10:48:22 +02:00
void * from , unsigned long n )
2008-03-25 18:47:20 +01:00
{
2008-07-25 15:51:00 +02:00
unsigned long prefix = vcpu - > arch . sie_block - > prefix ;
2008-03-25 18:47:20 +01:00
if ( ( guestdest < 2 * PAGE_SIZE ) & & ( guestdest + n > 2 * PAGE_SIZE ) )
goto slowpath ;
if ( ( guestdest < prefix ) & & ( guestdest + n > prefix ) )
goto slowpath ;
if ( ( guestdest < prefix + 2 * PAGE_SIZE )
& & ( guestdest + n > prefix + 2 * PAGE_SIZE ) )
goto slowpath ;
if ( guestdest < 2 * PAGE_SIZE )
guestdest + = prefix ;
else if ( ( guestdest > = prefix ) & & ( guestdest < prefix + 2 * PAGE_SIZE ) )
guestdest - = prefix ;
2011-07-24 10:48:22 +02:00
return __copy_to_guest_fast ( vcpu , guestdest , from , n ) ;
2008-03-25 18:47:20 +01:00
slowpath :
return __copy_to_guest_slow ( vcpu , guestdest , from , n ) ;
}
static inline int __copy_from_guest_slow ( struct kvm_vcpu * vcpu , void * to ,
2008-07-25 15:51:00 +02:00
unsigned long guestsrc ,
unsigned long n )
2008-03-25 18:47:20 +01:00
{
int rc ;
unsigned long i ;
u8 * data = to ;
for ( i = 0 ; i < n ; i + + ) {
rc = get_guest_u8 ( vcpu , guestsrc + + , data + + ) ;
if ( rc < 0 )
return rc ;
}
return 0 ;
}
2011-07-24 10:48:22 +02:00
static inline int __copy_from_guest_fast ( struct kvm_vcpu * vcpu , void * to ,
unsigned long guestsrc ,
unsigned long n )
2008-03-25 18:47:20 +01:00
{
2011-07-24 10:48:22 +02:00
int r ;
void __user * uptr ;
unsigned long size ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( guestsrc + n < guestsrc )
return - EFAULT ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
/* simple case: all within one segment table entry? */
if ( ( guestsrc & PMD_MASK ) = = ( ( guestsrc + n ) & PMD_MASK ) ) {
uptr = ( void __user * ) gmap_fault ( guestsrc , vcpu - > arch . gmap ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
r = copy_from_user ( to , uptr , n ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( r )
r = - EFAULT ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
goto out ;
}
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
/* copy first segment */
uptr = ( void __user * ) gmap_fault ( guestsrc , vcpu - > arch . gmap ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
size = PMD_SIZE - ( guestsrc & ~ PMD_MASK ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
r = copy_from_user ( to , uptr , size ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( r ) {
r = - EFAULT ;
goto out ;
}
to + = size ;
n - = size ;
guestsrc + = size ;
/* copy full segments */
while ( n > = PMD_SIZE ) {
uptr = ( void __user * ) gmap_fault ( guestsrc , vcpu - > arch . gmap ) ;
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
r = copy_from_user ( to , uptr , PMD_SIZE ) ;
if ( r ) {
r = - EFAULT ;
goto out ;
}
to + = PMD_SIZE ;
n - = PMD_SIZE ;
guestsrc + = PMD_SIZE ;
}
/* copy the tail segment */
if ( n ) {
uptr = ( void __user * ) gmap_fault ( guestsrc , vcpu - > arch . gmap ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( IS_ERR ( ( void __force * ) uptr ) )
return PTR_ERR ( ( void __force * ) uptr ) ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
r = copy_from_user ( to , uptr , n ) ;
if ( r )
r = - EFAULT ;
}
out :
return r ;
2008-03-25 18:47:20 +01:00
}
static inline int copy_from_guest_absolute ( struct kvm_vcpu * vcpu , void * to ,
2008-07-25 15:51:00 +02:00
unsigned long guestsrc ,
unsigned long n )
2008-03-25 18:47:20 +01:00
{
2011-07-24 10:48:22 +02:00
return __copy_from_guest_fast ( vcpu , to , guestsrc , n ) ;
}
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
static inline int copy_from_guest ( struct kvm_vcpu * vcpu , void * to ,
unsigned long guestsrc , unsigned long n )
{
unsigned long prefix = vcpu - > arch . sie_block - > prefix ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( ( guestsrc < 2 * PAGE_SIZE ) & & ( guestsrc + n > 2 * PAGE_SIZE ) )
goto slowpath ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
if ( ( guestsrc < prefix ) & & ( guestsrc + n > prefix ) )
goto slowpath ;
if ( ( guestsrc < prefix + 2 * PAGE_SIZE )
& & ( guestsrc + n > prefix + 2 * PAGE_SIZE ) )
goto slowpath ;
if ( guestsrc < 2 * PAGE_SIZE )
guestsrc + = prefix ;
else if ( ( guestsrc > = prefix ) & & ( guestsrc < prefix + 2 * PAGE_SIZE ) )
guestsrc - = prefix ;
2008-03-25 18:47:20 +01:00
2011-07-24 10:48:22 +02:00
return __copy_from_guest_fast ( vcpu , to , guestsrc , n ) ;
slowpath :
return __copy_from_guest_slow ( vcpu , to , guestsrc , n ) ;
2008-03-25 18:47:20 +01:00
}
# endif