2021-11-05 23:46:53 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Common Primitives for Data Access Monitoring
*
* Author : SeongJae Park < sj @ kernel . org >
*/
# include <linux/mmu_notifier.h>
# include <linux/page_idle.h>
# include <linux/pagemap.h>
# include <linux/rmap.h>
2022-03-23 00:48:46 +03:00
# include "ops-common.h"
2021-11-05 23:46:53 +03:00
/*
* Get an online page for a pfn if it ' s in the LRU list . Otherwise , returns
* NULL .
*
* The body of this function is stolen from the ' page_idle_get_page ( ) ' . We
* steal rather than reuse it because the code is quite simple .
*/
struct page * damon_get_page ( unsigned long pfn )
{
struct page * page = pfn_to_online_page ( pfn ) ;
if ( ! page | | ! PageLRU ( page ) | | ! get_page_unless_zero ( page ) )
return NULL ;
if ( unlikely ( ! PageLRU ( page ) ) ) {
put_page ( page ) ;
page = NULL ;
}
return page ;
}
void damon_ptep_mkold ( pte_t * pte , struct mm_struct * mm , unsigned long addr )
{
bool referenced = false ;
struct page * page = damon_get_page ( pte_pfn ( * pte ) ) ;
if ( ! page )
return ;
if ( pte_young ( * pte ) ) {
referenced = true ;
* pte = pte_mkold ( * pte ) ;
}
# ifdef CONFIG_MMU_NOTIFIER
if ( mmu_notifier_clear_young ( mm , addr , addr + PAGE_SIZE ) )
referenced = true ;
# endif /* CONFIG_MMU_NOTIFIER */
if ( referenced )
set_page_young ( page ) ;
set_page_idle ( page ) ;
put_page ( page ) ;
}
void damon_pmdp_mkold ( pmd_t * pmd , struct mm_struct * mm , unsigned long addr )
{
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool referenced = false ;
struct page * page = damon_get_page ( pmd_pfn ( * pmd ) ) ;
if ( ! page )
return ;
if ( pmd_young ( * pmd ) ) {
referenced = true ;
* pmd = pmd_mkold ( * pmd ) ;
}
# ifdef CONFIG_MMU_NOTIFIER
2022-05-17 17:51:20 +03:00
if ( mmu_notifier_clear_young ( mm , addr , addr + HPAGE_PMD_SIZE ) )
2021-11-05 23:46:53 +03:00
referenced = true ;
# endif /* CONFIG_MMU_NOTIFIER */
if ( referenced )
set_page_young ( page ) ;
set_page_idle ( page ) ;
put_page ( page ) ;
# endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
2021-11-05 23:47:37 +03:00
# define DAMON_MAX_SUBSCORE (100)
# define DAMON_MAX_AGE_IN_LOG (32)
2022-08-29 12:46:06 +03:00
int damon_hot_score ( struct damon_ctx * c , struct damon_region * r ,
2021-11-05 23:47:37 +03:00
struct damos * s )
{
unsigned int max_nr_accesses ;
int freq_subscore ;
unsigned int age_in_sec ;
int age_in_log , age_subscore ;
unsigned int freq_weight = s - > quota . weight_nr_accesses ;
unsigned int age_weight = s - > quota . weight_age ;
int hotness ;
2022-09-13 20:44:32 +03:00
max_nr_accesses = c - > attrs . aggr_interval / c - > attrs . sample_interval ;
2021-11-05 23:47:37 +03:00
freq_subscore = r - > nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses ;
2022-09-13 20:44:32 +03:00
age_in_sec = ( unsigned long ) r - > age * c - > attrs . aggr_interval / 1000000 ;
2021-11-05 23:47:37 +03:00
for ( age_in_log = 0 ; age_in_log < DAMON_MAX_AGE_IN_LOG & & age_in_sec ;
age_in_log + + , age_in_sec > > = 1 )
;
/* If frequency is 0, higher age means it's colder */
if ( freq_subscore = = 0 )
age_in_log * = - 1 ;
/*
* Now age_in_log is in [ - DAMON_MAX_AGE_IN_LOG , DAMON_MAX_AGE_IN_LOG ] .
* Scale it to be in [ 0 , 100 ] and set it as age subscore .
*/
age_in_log + = DAMON_MAX_AGE_IN_LOG ;
age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
DAMON_MAX_AGE_IN_LOG / 2 ;
hotness = ( freq_weight * freq_subscore + age_weight * age_subscore ) ;
if ( freq_weight + age_weight )
hotness / = freq_weight + age_weight ;
/*
* Transform it to fit in [ 0 , DAMOS_MAX_SCORE ]
*/
hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE ;
2022-08-29 12:46:06 +03:00
return hotness ;
2021-11-05 23:47:37 +03:00
}
2022-06-13 22:22:56 +03:00
2022-09-17 16:56:54 +03:00
int damon_cold_score ( struct damon_ctx * c , struct damon_region * r ,
2022-06-13 22:22:56 +03:00
struct damos * s )
{
2022-08-29 12:46:06 +03:00
int hotness = damon_hot_score ( c , r , s ) ;
2022-06-13 22:22:56 +03:00
2022-08-29 12:46:06 +03:00
/* Return coldness of the region */
return DAMOS_MAX_SCORE - hotness ;
2022-06-13 22:22:56 +03:00
}