2009-10-30 08:47:11 +03:00
/*
* Copyright ( C ) 2009 SUSE Linux Products GmbH . All rights reserved .
*
* Authors :
* Alexander Graf < agraf @ suse . de >
* Kevin Wolf < mail @ kevin - wolf . de >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*/
# include <linux/kvm_host.h>
# include <asm/kvm_ppc.h>
# include <asm/kvm_book3s.h>
# include <asm/mmu-hash64.h>
# include <asm/machdep.h>
# include <asm/mmu_context.h>
# include <asm/hw_irq.h>
2010-08-02 13:38:54 +04:00
# include "trace.h"
2009-10-30 08:47:11 +03:00
# define PTE_SIZE 12
2010-06-30 17:18:46 +04:00
void kvmppc_mmu_invalidate_pte ( struct kvm_vcpu * vcpu , struct hpte_cache * pte )
2009-10-30 08:47:11 +03:00
{
ppc_md . hpte_invalidate ( pte - > slot , pte - > host_va ,
MMU_PAGE_4K , MMU_SEGSIZE_256M ,
false ) ;
}
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
* a hash , so we don ' t waste cycles on looping */
static u16 kvmppc_sid_hash ( struct kvm_vcpu * vcpu , u64 gvsid )
{
2010-08-02 23:48:53 +04:00
return ( u16 ) ( ( ( gvsid > > ( SID_MAP_BITS * 7 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 6 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 5 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 4 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 3 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 2 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 1 ) ) & SID_MAP_MASK ) ^
( ( gvsid > > ( SID_MAP_BITS * 0 ) ) & SID_MAP_MASK ) ) ;
2009-10-30 08:47:11 +03:00
}
2010-08-02 23:48:53 +04:00
2009-10-30 08:47:11 +03:00
static struct kvmppc_sid_map * find_sid_vsid ( struct kvm_vcpu * vcpu , u64 gvsid )
{
struct kvmppc_sid_map * map ;
u16 sid_map_mask ;
2010-07-29 16:47:43 +04:00
if ( vcpu - > arch . shared - > msr & MSR_PR )
2009-10-30 08:47:11 +03:00
gvsid | = VSID_PR ;
sid_map_mask = kvmppc_sid_hash ( vcpu , gvsid ) ;
map = & to_book3s ( vcpu ) - > sid_map [ sid_map_mask ] ;
2010-08-02 15:38:18 +04:00
if ( map - > valid & & ( map - > guest_vsid = = gvsid ) ) {
2010-08-02 23:25:33 +04:00
trace_kvm_book3s_slb_found ( gvsid , map - > host_vsid ) ;
2009-10-30 08:47:11 +03:00
return map ;
}
map = & to_book3s ( vcpu ) - > sid_map [ SID_MAP_MASK - sid_map_mask ] ;
2010-08-02 15:38:18 +04:00
if ( map - > valid & & ( map - > guest_vsid = = gvsid ) ) {
2010-08-02 23:25:33 +04:00
trace_kvm_book3s_slb_found ( gvsid , map - > host_vsid ) ;
2009-10-30 08:47:11 +03:00
return map ;
}
2010-08-02 23:25:33 +04:00
trace_kvm_book3s_slb_fail ( sid_map_mask , gvsid ) ;
2009-10-30 08:47:11 +03:00
return NULL ;
}
int kvmppc_mmu_map_page ( struct kvm_vcpu * vcpu , struct kvmppc_pte * orig_pte )
{
pfn_t hpaddr ;
ulong hash , hpteg , va ;
u64 vsid ;
int ret ;
int rflags = 0x192 ;
int vflags = 0 ;
int attempt = 0 ;
struct kvmppc_sid_map * map ;
2011-12-09 17:44:13 +04:00
int r = 0 ;
2009-10-30 08:47:11 +03:00
/* Get host physical address for gpa */
2010-07-29 16:47:54 +04:00
hpaddr = kvmppc_gfn_to_pfn ( vcpu , orig_pte - > raddr > > PAGE_SHIFT ) ;
2010-07-29 17:04:18 +04:00
if ( is_error_pfn ( hpaddr ) ) {
2010-04-20 04:49:46 +04:00
printk ( KERN_INFO " Couldn't get guest page for gfn %lx! \n " , orig_pte - > eaddr ) ;
2011-12-09 17:44:13 +04:00
r = - EINVAL ;
goto out ;
2009-10-30 08:47:11 +03:00
}
hpaddr < < = PAGE_SHIFT ;
2010-07-29 16:47:54 +04:00
hpaddr | = orig_pte - > raddr & ( ~ 0xfffULL & ~ PAGE_MASK ) ;
2009-10-30 08:47:11 +03:00
/* and write the mapping ea -> hpa into the pt */
vcpu - > arch . mmu . esid_to_vsid ( vcpu , orig_pte - > eaddr > > SID_SHIFT , & vsid ) ;
map = find_sid_vsid ( vcpu , vsid ) ;
if ( ! map ) {
2010-04-20 04:49:50 +04:00
ret = kvmppc_mmu_map_segment ( vcpu , orig_pte - > eaddr ) ;
WARN_ON ( ret < 0 ) ;
2009-10-30 08:47:11 +03:00
map = find_sid_vsid ( vcpu , vsid ) ;
}
2010-04-20 04:49:50 +04:00
if ( ! map ) {
printk ( KERN_ERR " KVM: Segment map for 0x%llx (0x%lx) failed \n " ,
vsid , orig_pte - > eaddr ) ;
WARN_ON ( true ) ;
2011-12-09 17:44:13 +04:00
r = - EINVAL ;
goto out ;
2010-04-20 04:49:50 +04:00
}
2009-10-30 08:47:11 +03:00
vsid = map - > host_vsid ;
va = hpt_va ( orig_pte - > eaddr , vsid , MMU_SEGSIZE_256M ) ;
if ( ! orig_pte - > may_write )
rflags | = HPTE_R_PP ;
else
mark_page_dirty ( vcpu - > kvm , orig_pte - > raddr > > PAGE_SHIFT ) ;
if ( ! orig_pte - > may_execute )
rflags | = HPTE_R_N ;
2012-08-03 15:56:33 +04:00
else
kvmppc_mmu_flush_icache ( hpaddr > > PAGE_SHIFT ) ;
2009-10-30 08:47:11 +03:00
hash = hpt_hash ( va , PTE_SIZE , MMU_SEGSIZE_256M ) ;
map_again :
hpteg = ( ( hash & htab_hash_mask ) * HPTES_PER_GROUP ) ;
/* In case we tried normal mapping already, let's nuke old entries */
if ( attempt > 1 )
2011-12-09 17:44:13 +04:00
if ( ppc_md . hpte_remove ( hpteg ) < 0 ) {
r = - 1 ;
goto out ;
}
2009-10-30 08:47:11 +03:00
ret = ppc_md . hpte_insert ( hpteg , va , hpaddr , rflags , vflags , MMU_PAGE_4K , MMU_SEGSIZE_256M ) ;
if ( ret < 0 ) {
/* If we couldn't map a primary PTE, try a secondary */
hash = ~ hash ;
2010-02-19 13:00:46 +03:00
vflags ^ = HPTE_V_SECONDARY ;
2009-10-30 08:47:11 +03:00
attempt + + ;
goto map_again ;
} else {
2010-06-30 17:18:46 +04:00
struct hpte_cache * pte = kvmppc_mmu_hpte_cache_next ( vcpu ) ;
2009-10-30 08:47:11 +03:00
2010-08-02 13:38:54 +04:00
trace_kvm_book3s_64_mmu_map ( rflags , hpteg , va , hpaddr , orig_pte ) ;
2009-10-30 08:47:11 +03:00
2010-03-24 23:48:34 +03:00
/* The ppc_md code may give us a secondary entry even though we
asked for a primary . Fix up . */
if ( ( ret & _PTEIDX_SECONDARY ) & & ! ( vflags & HPTE_V_SECONDARY ) ) {
hash = ~ hash ;
hpteg = ( ( hash & htab_hash_mask ) * HPTES_PER_GROUP ) ;
}
2009-10-30 08:47:11 +03:00
pte - > slot = hpteg + ( ret & 7 ) ;
pte - > host_va = va ;
pte - > pte = * orig_pte ;
pte - > pfn = hpaddr > > PAGE_SHIFT ;
2010-06-30 17:18:46 +04:00
kvmppc_mmu_hpte_cache_map ( vcpu , pte ) ;
2009-10-30 08:47:11 +03:00
}
2011-12-09 17:44:13 +04:00
out :
return r ;
2009-10-30 08:47:11 +03:00
}
static struct kvmppc_sid_map * create_sid_map ( struct kvm_vcpu * vcpu , u64 gvsid )
{
struct kvmppc_sid_map * map ;
struct kvmppc_vcpu_book3s * vcpu_book3s = to_book3s ( vcpu ) ;
u16 sid_map_mask ;
static int backwards_map = 0 ;
2010-07-29 16:47:43 +04:00
if ( vcpu - > arch . shared - > msr & MSR_PR )
2009-10-30 08:47:11 +03:00
gvsid | = VSID_PR ;
/* We might get collisions that trap in preceding order, so let's
map them differently */
sid_map_mask = kvmppc_sid_hash ( vcpu , gvsid ) ;
if ( backwards_map )
sid_map_mask = SID_MAP_MASK - sid_map_mask ;
map = & to_book3s ( vcpu ) - > sid_map [ sid_map_mask ] ;
/* Make sure we're taking the other map next time */
backwards_map = ! backwards_map ;
/* Uh-oh ... out of mappings. Let's flush! */
2012-03-23 04:21:14 +04:00
if ( vcpu_book3s - > proto_vsid_next = = vcpu_book3s - > proto_vsid_max ) {
vcpu_book3s - > proto_vsid_next = vcpu_book3s - > proto_vsid_first ;
2009-10-30 08:47:11 +03:00
memset ( vcpu_book3s - > sid_map , 0 ,
sizeof ( struct kvmppc_sid_map ) * SID_MAP_NUM ) ;
kvmppc_mmu_pte_flush ( vcpu , 0 , 0 ) ;
kvmppc_mmu_flush_segments ( vcpu ) ;
}
2012-03-23 04:21:14 +04:00
map - > host_vsid = vsid_scramble ( vcpu_book3s - > proto_vsid_next + + , 256 M ) ;
2009-10-30 08:47:11 +03:00
map - > guest_vsid = gvsid ;
map - > valid = true ;
2010-08-02 23:25:33 +04:00
trace_kvm_book3s_slb_map ( sid_map_mask , gvsid , map - > host_vsid ) ;
2010-04-20 04:49:52 +04:00
2009-10-30 08:47:11 +03:00
return map ;
}
static int kvmppc_mmu_next_segment ( struct kvm_vcpu * vcpu , ulong esid )
{
2011-12-09 17:44:13 +04:00
struct kvmppc_book3s_shadow_vcpu * svcpu = svcpu_get ( vcpu ) ;
2009-10-30 08:47:11 +03:00
int i ;
int max_slb_size = 64 ;
int found_inval = - 1 ;
int r ;
2011-12-09 17:44:13 +04:00
if ( ! svcpu - > slb_max )
svcpu - > slb_max = 1 ;
2009-10-30 08:47:11 +03:00
/* Are we overwriting? */
2011-12-09 17:44:13 +04:00
for ( i = 1 ; i < svcpu - > slb_max ; i + + ) {
if ( ! ( svcpu - > slb [ i ] . esid & SLB_ESID_V ) )
2009-10-30 08:47:11 +03:00
found_inval = i ;
2011-12-09 17:44:13 +04:00
else if ( ( svcpu - > slb [ i ] . esid & ESID_MASK ) = = esid ) {
r = i ;
goto out ;
}
2009-10-30 08:47:11 +03:00
}
/* Found a spare entry that was invalidated before */
2011-12-09 17:44:13 +04:00
if ( found_inval > 0 ) {
r = found_inval ;
goto out ;
}
2009-10-30 08:47:11 +03:00
/* No spare invalid entry, so create one */
if ( mmu_slb_size < 64 )
max_slb_size = mmu_slb_size ;
/* Overflowing -> purge */
2011-12-09 17:44:13 +04:00
if ( ( svcpu - > slb_max ) = = max_slb_size )
2009-10-30 08:47:11 +03:00
kvmppc_mmu_flush_segments ( vcpu ) ;
2011-12-09 17:44:13 +04:00
r = svcpu - > slb_max ;
svcpu - > slb_max + + ;
2009-10-30 08:47:11 +03:00
2011-12-09 17:44:13 +04:00
out :
svcpu_put ( svcpu ) ;
2009-10-30 08:47:11 +03:00
return r ;
}
int kvmppc_mmu_map_segment ( struct kvm_vcpu * vcpu , ulong eaddr )
{
2011-12-09 17:44:13 +04:00
struct kvmppc_book3s_shadow_vcpu * svcpu = svcpu_get ( vcpu ) ;
2009-10-30 08:47:11 +03:00
u64 esid = eaddr > > SID_SHIFT ;
u64 slb_esid = ( eaddr & ESID_MASK ) | SLB_ESID_V ;
u64 slb_vsid = SLB_VSID_USER ;
u64 gvsid ;
int slb_index ;
struct kvmppc_sid_map * map ;
2011-12-09 17:44:13 +04:00
int r = 0 ;
2009-10-30 08:47:11 +03:00
slb_index = kvmppc_mmu_next_segment ( vcpu , eaddr & ESID_MASK ) ;
if ( vcpu - > arch . mmu . esid_to_vsid ( vcpu , esid , & gvsid ) ) {
/* Invalidate an entry */
2011-12-09 17:44:13 +04:00
svcpu - > slb [ slb_index ] . esid = 0 ;
r = - ENOENT ;
goto out ;
2009-10-30 08:47:11 +03:00
}
map = find_sid_vsid ( vcpu , gvsid ) ;
if ( ! map )
map = create_sid_map ( vcpu , gvsid ) ;
map - > guest_esid = esid ;
slb_vsid | = ( map - > host_vsid < < 12 ) ;
slb_vsid & = ~ SLB_VSID_KP ;
slb_esid | = slb_index ;
2011-12-09 17:44:13 +04:00
svcpu - > slb [ slb_index ] . esid = slb_esid ;
svcpu - > slb [ slb_index ] . vsid = slb_vsid ;
2009-10-30 08:47:11 +03:00
2010-08-02 23:25:33 +04:00
trace_kvm_book3s_slbmte ( slb_vsid , slb_esid ) ;
2009-10-30 08:47:11 +03:00
2011-12-09 17:44:13 +04:00
out :
svcpu_put ( svcpu ) ;
return r ;
2009-10-30 08:47:11 +03:00
}
void kvmppc_mmu_flush_segments ( struct kvm_vcpu * vcpu )
{
2011-12-09 17:44:13 +04:00
struct kvmppc_book3s_shadow_vcpu * svcpu = svcpu_get ( vcpu ) ;
svcpu - > slb_max = 1 ;
svcpu - > slb [ 0 ] . esid = 0 ;
svcpu_put ( svcpu ) ;
2009-10-30 08:47:11 +03:00
}
void kvmppc_mmu_destroy ( struct kvm_vcpu * vcpu )
{
2010-06-30 17:18:46 +04:00
kvmppc_mmu_hpte_destroy ( vcpu ) ;
2010-08-15 10:04:24 +04:00
__destroy_context ( to_book3s ( vcpu ) - > context_id [ 0 ] ) ;
2010-04-16 02:11:45 +04:00
}
int kvmppc_mmu_init ( struct kvm_vcpu * vcpu )
{
struct kvmppc_vcpu_book3s * vcpu3s = to_book3s ( vcpu ) ;
int err ;
err = __init_new_context ( ) ;
if ( err < 0 )
return - 1 ;
2010-08-15 10:04:24 +04:00
vcpu3s - > context_id [ 0 ] = err ;
2010-04-16 02:11:45 +04:00
2012-03-23 04:21:14 +04:00
vcpu3s - > proto_vsid_max = ( ( vcpu3s - > context_id [ 0 ] + 1 )
< < USER_ESID_BITS ) - 1 ;
vcpu3s - > proto_vsid_first = vcpu3s - > context_id [ 0 ] < < USER_ESID_BITS ;
vcpu3s - > proto_vsid_next = vcpu3s - > proto_vsid_first ;
2010-04-16 02:11:45 +04:00
2010-06-30 17:18:46 +04:00
kvmppc_mmu_hpte_init ( vcpu ) ;
2010-04-16 02:11:45 +04:00
return 0 ;
2009-10-30 08:47:11 +03:00
}