2008-05-07 09:22:59 +02:00
/*
* Copyright IBM Corp . 2008
*
* Guest page hinting for unused pages .
*
* Author ( s ) : Martin Schwidefsky < schwidefsky @ de . ibm . com >
*/
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/init.h>
# define ESSA_SET_STABLE 1
# define ESSA_SET_UNUSED 2
2009-09-11 10:29:01 +02:00
static int cmma_flag = 1 ;
2008-05-07 09:22:59 +02:00
static int __init cmma ( char * str )
{
char * parm ;
2009-09-11 10:29:01 +02:00
2008-05-07 09:22:59 +02:00
parm = strstrip ( str ) ;
if ( strcmp ( parm , " yes " ) = = 0 | | strcmp ( parm , " on " ) = = 0 ) {
cmma_flag = 1 ;
return 1 ;
}
cmma_flag = 0 ;
if ( strcmp ( parm , " no " ) = = 0 | | strcmp ( parm , " off " ) = = 0 )
return 1 ;
return 0 ;
}
__setup ( " cmma= " , cmma ) ;
void __init cmma_init ( void )
{
register unsigned long tmp asm ( " 0 " ) = 0 ;
register int rc asm ( " 1 " ) = - EOPNOTSUPP ;
if ( ! cmma_flag )
return ;
asm volatile (
" .insn rrf,0xb9ab0000,%1,%1,0,0 \n "
" 0: la %0,0 \n "
" 1: \n "
EX_TABLE ( 0 b , 1 b )
: " +&d " ( rc ) , " +&d " ( tmp ) ) ;
if ( rc )
cmma_flag = 0 ;
}
2009-09-22 22:58:44 +02:00
static inline void set_page_unstable ( struct page * page , int order )
2008-05-07 09:22:59 +02:00
{
int i , rc ;
for ( i = 0 ; i < ( 1 < < order ) ; i + + )
asm volatile ( " .insn rrf,0xb9ab0000,%0,%1,%2,0 "
: " =&d " ( rc )
2009-09-22 22:58:44 +02:00
: " a " ( page_to_phys ( page + i ) ) ,
2008-05-07 09:22:59 +02:00
" i " ( ESSA_SET_UNUSED ) ) ;
}
2009-09-22 22:58:44 +02:00
void arch_free_page ( struct page * page , int order )
2008-05-07 09:22:59 +02:00
{
if ( ! cmma_flag )
return ;
2009-09-22 22:58:44 +02:00
set_page_unstable ( page , order ) ;
}
static inline void set_page_stable ( struct page * page , int order )
{
int i , rc ;
2008-05-07 09:22:59 +02:00
for ( i = 0 ; i < ( 1 < < order ) ; i + + )
asm volatile ( " .insn rrf,0xb9ab0000,%0,%1,%2,0 "
: " =&d " ( rc )
2009-09-22 22:58:44 +02:00
: " a " ( page_to_phys ( page + i ) ) ,
2008-05-07 09:22:59 +02:00
" i " ( ESSA_SET_STABLE ) ) ;
}
2009-09-22 22:58:44 +02:00
void arch_alloc_page ( struct page * page , int order )
{
if ( ! cmma_flag )
return ;
set_page_stable ( page , order ) ;
}
void arch_set_page_states ( int make_stable )
{
unsigned long flags , order , t ;
struct list_head * l ;
struct page * page ;
struct zone * zone ;
if ( ! cmma_flag )
return ;
if ( make_stable )
drain_local_pages ( NULL ) ;
for_each_populated_zone ( zone ) {
spin_lock_irqsave ( & zone - > lock , flags ) ;
for_each_migratetype_order ( order , t ) {
list_for_each ( l , & zone - > free_area [ order ] . free_list [ t ] ) {
page = list_entry ( l , struct page , lru ) ;
if ( make_stable )
set_page_stable ( page , order ) ;
else
set_page_unstable ( page , order ) ;
}
}
spin_unlock_irqrestore ( & zone - > lock , flags ) ;
}
}