2017-06-29 22:01:41 +03:00
/* binder_alloc.c
*
* Android IPC Subsystem
*
* Copyright ( C ) 2007 - 2017 Google , Inc .
*
* This software is licensed under the terms of the GNU General Public
* License version 2 , as published by the Free Software Foundation , and
* may be copied , distributed , and modified under those terms .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <asm/cacheflush.h>
# include <linux/list.h>
# include <linux/sched/mm.h>
# include <linux/module.h>
# include <linux/rtmutex.h>
# include <linux/rbtree.h>
# include <linux/seq_file.h>
# include <linux/vmalloc.h>
# include <linux/slab.h>
# include <linux/sched.h>
2017-08-23 18:46:42 +03:00
# include <linux/list_lru.h>
2017-06-29 22:01:41 +03:00
# include "binder_alloc.h"
# include "binder_trace.h"
2017-08-23 18:46:42 +03:00
struct list_lru binder_alloc_lru ;
2017-06-29 22:01:41 +03:00
static DEFINE_MUTEX ( binder_alloc_mmap_lock ) ;
enum {
BINDER_DEBUG_OPEN_CLOSE = 1U < < 1 ,
BINDER_DEBUG_BUFFER_ALLOC = 1U < < 2 ,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U < < 3 ,
} ;
static uint32_t binder_alloc_debug_mask ;
module_param_named ( debug_mask , binder_alloc_debug_mask ,
uint , 0644 ) ;
# define binder_alloc_debug(mask, x...) \
do { \
if ( binder_alloc_debug_mask & mask ) \
pr_info ( x ) ; \
} while ( 0 )
2017-08-23 18:46:39 +03:00
static struct binder_buffer * binder_buffer_next ( struct binder_buffer * buffer )
{
return list_entry ( buffer - > entry . next , struct binder_buffer , entry ) ;
}
static struct binder_buffer * binder_buffer_prev ( struct binder_buffer * buffer )
{
return list_entry ( buffer - > entry . prev , struct binder_buffer , entry ) ;
}
2017-06-29 22:01:41 +03:00
static size_t binder_alloc_buffer_size ( struct binder_alloc * alloc ,
struct binder_buffer * buffer )
{
if ( list_is_last ( & buffer - > entry , & alloc - > buffers ) )
2017-08-23 18:46:41 +03:00
return ( u8 * ) alloc - > buffer +
alloc - > buffer_size - ( u8 * ) buffer - > data ;
return ( u8 * ) binder_buffer_next ( buffer ) - > data - ( u8 * ) buffer - > data ;
2017-06-29 22:01:41 +03:00
}
static void binder_insert_free_buffer ( struct binder_alloc * alloc ,
struct binder_buffer * new_buffer )
{
struct rb_node * * p = & alloc - > free_buffers . rb_node ;
struct rb_node * parent = NULL ;
struct binder_buffer * buffer ;
size_t buffer_size ;
size_t new_buffer_size ;
BUG_ON ( ! new_buffer - > free ) ;
new_buffer_size = binder_alloc_buffer_size ( alloc , new_buffer ) ;
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: add free buffer, size %zd, at %pK \n " ,
alloc - > pid , new_buffer_size , new_buffer ) ;
while ( * p ) {
parent = * p ;
buffer = rb_entry ( parent , struct binder_buffer , rb_node ) ;
BUG_ON ( ! buffer - > free ) ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
if ( new_buffer_size < buffer_size )
p = & parent - > rb_left ;
else
p = & parent - > rb_right ;
}
rb_link_node ( & new_buffer - > rb_node , parent , p ) ;
rb_insert_color ( & new_buffer - > rb_node , & alloc - > free_buffers ) ;
}
static void binder_insert_allocated_buffer_locked (
struct binder_alloc * alloc , struct binder_buffer * new_buffer )
{
struct rb_node * * p = & alloc - > allocated_buffers . rb_node ;
struct rb_node * parent = NULL ;
struct binder_buffer * buffer ;
BUG_ON ( new_buffer - > free ) ;
while ( * p ) {
parent = * p ;
buffer = rb_entry ( parent , struct binder_buffer , rb_node ) ;
BUG_ON ( buffer - > free ) ;
2017-08-23 18:46:41 +03:00
if ( new_buffer - > data < buffer - > data )
2017-06-29 22:01:41 +03:00
p = & parent - > rb_left ;
2017-08-23 18:46:41 +03:00
else if ( new_buffer - > data > buffer - > data )
2017-06-29 22:01:41 +03:00
p = & parent - > rb_right ;
else
BUG ( ) ;
}
rb_link_node ( & new_buffer - > rb_node , parent , p ) ;
rb_insert_color ( & new_buffer - > rb_node , & alloc - > allocated_buffers ) ;
}
2017-06-29 22:01:51 +03:00
static struct binder_buffer * binder_alloc_prepare_to_free_locked (
2017-06-29 22:01:41 +03:00
struct binder_alloc * alloc ,
uintptr_t user_ptr )
{
struct rb_node * n = alloc - > allocated_buffers . rb_node ;
struct binder_buffer * buffer ;
2017-08-23 18:46:41 +03:00
void * kern_ptr ;
2017-06-29 22:01:41 +03:00
2017-08-23 18:46:41 +03:00
kern_ptr = ( void * ) ( user_ptr - alloc - > user_buffer_offset ) ;
2017-06-29 22:01:41 +03:00
while ( n ) {
buffer = rb_entry ( n , struct binder_buffer , rb_node ) ;
BUG_ON ( buffer - > free ) ;
2017-08-23 18:46:41 +03:00
if ( kern_ptr < buffer - > data )
2017-06-29 22:01:41 +03:00
n = n - > rb_left ;
2017-08-23 18:46:41 +03:00
else if ( kern_ptr > buffer - > data )
2017-06-29 22:01:41 +03:00
n = n - > rb_right ;
2017-06-29 22:01:51 +03:00
else {
/*
* Guard against user threads attempting to
* free the buffer twice
*/
if ( buffer - > free_in_progress ) {
pr_err ( " %d:%d FREE_BUFFER u%016llx user freed buffer twice \n " ,
alloc - > pid , current - > pid , ( u64 ) user_ptr ) ;
return NULL ;
}
buffer - > free_in_progress = 1 ;
2017-06-29 22:01:41 +03:00
return buffer ;
2017-06-29 22:01:51 +03:00
}
2017-06-29 22:01:41 +03:00
}
return NULL ;
}
/**
* binder_alloc_buffer_lookup ( ) - get buffer given user ptr
* @ alloc : binder_alloc for this proc
* @ user_ptr : User pointer to buffer data
*
* Validate userspace pointer to buffer data and return buffer corresponding to
* that user pointer . Search the rb tree for buffer that matches user data
* pointer .
*
* Return : Pointer to buffer or NULL
*/
2017-06-29 22:01:51 +03:00
struct binder_buffer * binder_alloc_prepare_to_free ( struct binder_alloc * alloc ,
uintptr_t user_ptr )
2017-06-29 22:01:41 +03:00
{
struct binder_buffer * buffer ;
mutex_lock ( & alloc - > mutex ) ;
2017-06-29 22:01:51 +03:00
buffer = binder_alloc_prepare_to_free_locked ( alloc , user_ptr ) ;
2017-06-29 22:01:41 +03:00
mutex_unlock ( & alloc - > mutex ) ;
return buffer ;
}
static int binder_update_page_range ( struct binder_alloc * alloc , int allocate ,
void * start , void * end ,
struct vm_area_struct * vma )
{
void * page_addr ;
unsigned long user_page_addr ;
2017-08-23 18:46:42 +03:00
struct binder_lru_page * page ;
struct mm_struct * mm = NULL ;
bool need_mm = false ;
2017-06-29 22:01:41 +03:00
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: %s pages %pK-%pK \n " , alloc - > pid ,
allocate ? " allocate " : " free " , start , end ) ;
if ( end < = start )
return 0 ;
trace_binder_update_page_range ( alloc , allocate , start , end ) ;
2017-08-23 18:46:42 +03:00
if ( allocate = = 0 )
goto free_range ;
for ( page_addr = start ; page_addr < end ; page_addr + = PAGE_SIZE ) {
page = & alloc - > pages [ ( page_addr - alloc - > buffer ) / PAGE_SIZE ] ;
if ( ! page - > page_ptr ) {
need_mm = true ;
break ;
}
}
if ( ! vma & & need_mm )
2017-06-29 22:01:41 +03:00
mm = get_task_mm ( alloc - > tsk ) ;
if ( mm ) {
down_write ( & mm - > mmap_sem ) ;
vma = alloc - > vma ;
if ( vma & & mm ! = alloc - > vma_vm_mm ) {
pr_err ( " %d: vma mm and task mm mismatch \n " ,
alloc - > pid ) ;
vma = NULL ;
}
}
2017-08-23 18:46:42 +03:00
if ( ! vma & & need_mm ) {
2017-06-29 22:01:41 +03:00
pr_err ( " %d: binder_alloc_buf failed to map pages in userspace, no vma \n " ,
alloc - > pid ) ;
goto err_no_vma ;
}
for ( page_addr = start ; page_addr < end ; page_addr + = PAGE_SIZE ) {
int ret ;
2017-08-23 18:46:42 +03:00
bool on_lru ;
2017-08-23 18:46:43 +03:00
size_t index ;
2017-06-29 22:01:41 +03:00
2017-08-23 18:46:43 +03:00
index = ( page_addr - alloc - > buffer ) / PAGE_SIZE ;
page = & alloc - > pages [ index ] ;
2017-06-29 22:01:41 +03:00
2017-08-23 18:46:42 +03:00
if ( page - > page_ptr ) {
2017-08-23 18:46:43 +03:00
trace_binder_alloc_lru_start ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
on_lru = list_lru_del ( & binder_alloc_lru , & page - > lru ) ;
WARN_ON ( ! on_lru ) ;
2017-08-23 18:46:43 +03:00
trace_binder_alloc_lru_end ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
continue ;
}
if ( WARN_ON ( ! vma ) )
goto err_page_ptr_cleared ;
2017-08-23 18:46:43 +03:00
trace_binder_alloc_page_start ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
page - > page_ptr = alloc_page ( GFP_KERNEL |
__GFP_HIGHMEM |
__GFP_ZERO ) ;
if ( ! page - > page_ptr ) {
2017-06-29 22:01:41 +03:00
pr_err ( " %d: binder_alloc_buf failed for page at %pK \n " ,
alloc - > pid , page_addr ) ;
goto err_alloc_page_failed ;
}
2017-08-23 18:46:42 +03:00
page - > alloc = alloc ;
INIT_LIST_HEAD ( & page - > lru ) ;
2017-06-29 22:01:41 +03:00
ret = map_kernel_range_noflush ( ( unsigned long ) page_addr ,
2017-08-23 18:46:42 +03:00
PAGE_SIZE , PAGE_KERNEL ,
& page - > page_ptr ) ;
2017-06-29 22:01:41 +03:00
flush_cache_vmap ( ( unsigned long ) page_addr ,
( unsigned long ) page_addr + PAGE_SIZE ) ;
if ( ret ! = 1 ) {
pr_err ( " %d: binder_alloc_buf failed to map page at %pK in kernel \n " ,
alloc - > pid , page_addr ) ;
goto err_map_kernel_failed ;
}
user_page_addr =
( uintptr_t ) page_addr + alloc - > user_buffer_offset ;
2017-08-23 18:46:42 +03:00
ret = vm_insert_page ( vma , user_page_addr , page [ 0 ] . page_ptr ) ;
2017-06-29 22:01:41 +03:00
if ( ret ) {
pr_err ( " %d: binder_alloc_buf failed to map page at %lx in userspace \n " ,
alloc - > pid , user_page_addr ) ;
goto err_vm_insert_page_failed ;
}
2017-08-23 18:46:43 +03:00
trace_binder_alloc_page_end ( alloc , index ) ;
2017-06-29 22:01:41 +03:00
/* vm_insert_page does not seem to increment the refcount */
}
if ( mm ) {
up_write ( & mm - > mmap_sem ) ;
mmput ( mm ) ;
}
return 0 ;
free_range :
for ( page_addr = end - PAGE_SIZE ; page_addr > = start ;
page_addr - = PAGE_SIZE ) {
2017-08-23 18:46:42 +03:00
bool ret ;
2017-08-23 18:46:43 +03:00
size_t index ;
2017-08-23 18:46:42 +03:00
2017-08-23 18:46:43 +03:00
index = ( page_addr - alloc - > buffer ) / PAGE_SIZE ;
page = & alloc - > pages [ index ] ;
trace_binder_free_lru_start ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
ret = list_lru_add ( & binder_alloc_lru , & page - > lru ) ;
WARN_ON ( ! ret ) ;
2017-08-23 18:46:43 +03:00
trace_binder_free_lru_end ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
continue ;
2017-06-29 22:01:41 +03:00
err_vm_insert_page_failed :
unmap_kernel_range ( ( unsigned long ) page_addr , PAGE_SIZE ) ;
err_map_kernel_failed :
2017-08-23 18:46:42 +03:00
__free_page ( page - > page_ptr ) ;
page - > page_ptr = NULL ;
2017-06-29 22:01:41 +03:00
err_alloc_page_failed :
2017-08-23 18:46:42 +03:00
err_page_ptr_cleared :
2017-06-29 22:01:41 +03:00
;
}
err_no_vma :
if ( mm ) {
up_write ( & mm - > mmap_sem ) ;
mmput ( mm ) ;
}
2017-06-29 22:01:46 +03:00
return vma ? - ENOMEM : - ESRCH ;
2017-06-29 22:01:41 +03:00
}
struct binder_buffer * binder_alloc_new_buf_locked ( struct binder_alloc * alloc ,
size_t data_size ,
size_t offsets_size ,
size_t extra_buffers_size ,
int is_async )
{
struct rb_node * n = alloc - > free_buffers . rb_node ;
struct binder_buffer * buffer ;
size_t buffer_size ;
struct rb_node * best_fit = NULL ;
void * has_page_addr ;
void * end_page_addr ;
size_t size , data_offsets_size ;
2017-06-29 22:01:46 +03:00
int ret ;
2017-06-29 22:01:41 +03:00
if ( alloc - > vma = = NULL ) {
pr_err ( " %d: binder_alloc_buf, no vma \n " ,
alloc - > pid ) ;
2017-06-29 22:01:46 +03:00
return ERR_PTR ( - ESRCH ) ;
2017-06-29 22:01:41 +03:00
}
data_offsets_size = ALIGN ( data_size , sizeof ( void * ) ) +
ALIGN ( offsets_size , sizeof ( void * ) ) ;
if ( data_offsets_size < data_size | | data_offsets_size < offsets_size ) {
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: got transaction with invalid size %zd-%zd \n " ,
alloc - > pid , data_size , offsets_size ) ;
2017-06-29 22:01:46 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-06-29 22:01:41 +03:00
}
size = data_offsets_size + ALIGN ( extra_buffers_size , sizeof ( void * ) ) ;
if ( size < data_offsets_size | | size < extra_buffers_size ) {
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: got transaction with invalid extra_buffers_size %zd \n " ,
alloc - > pid , extra_buffers_size ) ;
2017-06-29 22:01:46 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-06-29 22:01:41 +03:00
}
if ( is_async & &
alloc - > free_async_space < size + sizeof ( struct binder_buffer ) ) {
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: binder_alloc_buf size %zd failed, no async space left \n " ,
alloc - > pid , size ) ;
2017-06-29 22:01:46 +03:00
return ERR_PTR ( - ENOSPC ) ;
2017-06-29 22:01:41 +03:00
}
2017-08-23 18:46:41 +03:00
/* Pad 0-size buffers so they get assigned unique addresses */
size = max ( size , sizeof ( void * ) ) ;
2017-06-29 22:01:41 +03:00
while ( n ) {
buffer = rb_entry ( n , struct binder_buffer , rb_node ) ;
BUG_ON ( ! buffer - > free ) ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
if ( size < buffer_size ) {
best_fit = n ;
n = n - > rb_left ;
} else if ( size > buffer_size )
n = n - > rb_right ;
else {
best_fit = n ;
break ;
}
}
if ( best_fit = = NULL ) {
2017-06-29 22:01:52 +03:00
size_t allocated_buffers = 0 ;
size_t largest_alloc_size = 0 ;
size_t total_alloc_size = 0 ;
size_t free_buffers = 0 ;
size_t largest_free_size = 0 ;
size_t total_free_size = 0 ;
for ( n = rb_first ( & alloc - > allocated_buffers ) ; n ! = NULL ;
n = rb_next ( n ) ) {
buffer = rb_entry ( n , struct binder_buffer , rb_node ) ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
allocated_buffers + + ;
total_alloc_size + = buffer_size ;
if ( buffer_size > largest_alloc_size )
largest_alloc_size = buffer_size ;
}
for ( n = rb_first ( & alloc - > free_buffers ) ; n ! = NULL ;
n = rb_next ( n ) ) {
buffer = rb_entry ( n , struct binder_buffer , rb_node ) ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
free_buffers + + ;
total_free_size + = buffer_size ;
if ( buffer_size > largest_free_size )
largest_free_size = buffer_size ;
}
2017-06-29 22:01:41 +03:00
pr_err ( " %d: binder_alloc_buf size %zd failed, no address space \n " ,
alloc - > pid , size ) ;
2017-06-29 22:01:52 +03:00
pr_err ( " allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd) \n " ,
total_alloc_size , allocated_buffers , largest_alloc_size ,
total_free_size , free_buffers , largest_free_size ) ;
2017-06-29 22:01:46 +03:00
return ERR_PTR ( - ENOSPC ) ;
2017-06-29 22:01:41 +03:00
}
if ( n = = NULL ) {
buffer = rb_entry ( best_fit , struct binder_buffer , rb_node ) ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
}
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: binder_alloc_buf size %zd got buffer %pK size %zd \n " ,
alloc - > pid , size , buffer , buffer_size ) ;
has_page_addr =
( void * ) ( ( ( uintptr_t ) buffer - > data + buffer_size ) & PAGE_MASK ) ;
2017-08-23 18:46:41 +03:00
WARN_ON ( n & & buffer_size ! = size ) ;
2017-06-29 22:01:41 +03:00
end_page_addr =
2017-08-23 18:46:41 +03:00
( void * ) PAGE_ALIGN ( ( uintptr_t ) buffer - > data + size ) ;
2017-06-29 22:01:41 +03:00
if ( end_page_addr > has_page_addr )
end_page_addr = has_page_addr ;
2017-06-29 22:01:46 +03:00
ret = binder_update_page_range ( alloc , 1 ,
( void * ) PAGE_ALIGN ( ( uintptr_t ) buffer - > data ) , end_page_addr , NULL ) ;
if ( ret )
return ERR_PTR ( ret ) ;
2017-06-29 22:01:41 +03:00
if ( buffer_size ! = size ) {
2017-08-23 18:46:41 +03:00
struct binder_buffer * new_buffer ;
2017-06-29 22:01:41 +03:00
2017-08-23 18:46:41 +03:00
new_buffer = kzalloc ( sizeof ( * buffer ) , GFP_KERNEL ) ;
if ( ! new_buffer ) {
pr_err ( " %s: %d failed to alloc new buffer struct \n " ,
__func__ , alloc - > pid ) ;
goto err_alloc_buf_struct_failed ;
}
new_buffer - > data = ( u8 * ) buffer - > data + size ;
2017-06-29 22:01:41 +03:00
list_add ( & new_buffer - > entry , & buffer - > entry ) ;
new_buffer - > free = 1 ;
binder_insert_free_buffer ( alloc , new_buffer ) ;
}
2017-08-23 18:46:41 +03:00
rb_erase ( best_fit , & alloc - > free_buffers ) ;
buffer - > free = 0 ;
buffer - > free_in_progress = 0 ;
binder_insert_allocated_buffer_locked ( alloc , buffer ) ;
2017-06-29 22:01:41 +03:00
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: binder_alloc_buf size %zd got %pK \n " ,
alloc - > pid , size , buffer ) ;
buffer - > data_size = data_size ;
buffer - > offsets_size = offsets_size ;
buffer - > async_transaction = is_async ;
buffer - > extra_buffers_size = extra_buffers_size ;
if ( is_async ) {
alloc - > free_async_space - = size + sizeof ( struct binder_buffer ) ;
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC_ASYNC ,
" %d: binder_alloc_buf size %zd async free %zd \n " ,
alloc - > pid , size , alloc - > free_async_space ) ;
}
return buffer ;
2017-08-23 18:46:41 +03:00
err_alloc_buf_struct_failed :
binder_update_page_range ( alloc , 0 ,
( void * ) PAGE_ALIGN ( ( uintptr_t ) buffer - > data ) ,
end_page_addr , NULL ) ;
return ERR_PTR ( - ENOMEM ) ;
2017-06-29 22:01:41 +03:00
}
/**
* binder_alloc_new_buf ( ) - Allocate a new binder buffer
* @ alloc : binder_alloc for this proc
* @ data_size : size of user data buffer
* @ offsets_size : user specified buffer offset
* @ extra_buffers_size : size of extra space for meta - data ( eg , security context )
* @ is_async : buffer for async transaction
*
* Allocate a new buffer given the requested sizes . Returns
* the kernel version of the buffer pointer . The size allocated
* is the sum of the three given sizes ( each rounded up to
* pointer - sized boundary )
*
* Return : The allocated buffer or % NULL if error
*/
struct binder_buffer * binder_alloc_new_buf ( struct binder_alloc * alloc ,
size_t data_size ,
size_t offsets_size ,
size_t extra_buffers_size ,
int is_async )
{
struct binder_buffer * buffer ;
mutex_lock ( & alloc - > mutex ) ;
buffer = binder_alloc_new_buf_locked ( alloc , data_size , offsets_size ,
extra_buffers_size , is_async ) ;
mutex_unlock ( & alloc - > mutex ) ;
return buffer ;
}
static void * buffer_start_page ( struct binder_buffer * buffer )
{
2017-08-23 18:46:41 +03:00
return ( void * ) ( ( uintptr_t ) buffer - > data & PAGE_MASK ) ;
2017-06-29 22:01:41 +03:00
}
2017-08-23 18:46:41 +03:00
static void * prev_buffer_end_page ( struct binder_buffer * buffer )
2017-06-29 22:01:41 +03:00
{
2017-08-23 18:46:41 +03:00
return ( void * ) ( ( ( uintptr_t ) ( buffer - > data ) - 1 ) & PAGE_MASK ) ;
2017-06-29 22:01:41 +03:00
}
static void binder_delete_free_buffer ( struct binder_alloc * alloc ,
struct binder_buffer * buffer )
{
struct binder_buffer * prev , * next = NULL ;
2017-08-23 18:46:41 +03:00
bool to_free = true ;
2017-06-29 22:01:41 +03:00
BUG_ON ( alloc - > buffers . next = = & buffer - > entry ) ;
2017-08-23 18:46:39 +03:00
prev = binder_buffer_prev ( buffer ) ;
2017-06-29 22:01:41 +03:00
BUG_ON ( ! prev - > free ) ;
2017-08-23 18:46:41 +03:00
if ( prev_buffer_end_page ( prev ) = = buffer_start_page ( buffer ) ) {
to_free = false ;
2017-06-29 22:01:41 +03:00
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
2017-08-23 18:46:41 +03:00
" %d: merge free, buffer %pK share page with %pK \n " ,
alloc - > pid , buffer - > data , prev - > data ) ;
2017-06-29 22:01:41 +03:00
}
if ( ! list_is_last ( & buffer - > entry , & alloc - > buffers ) ) {
2017-08-23 18:46:39 +03:00
next = binder_buffer_next ( buffer ) ;
2017-08-23 18:46:41 +03:00
if ( buffer_start_page ( next ) = = buffer_start_page ( buffer ) ) {
to_free = false ;
2017-06-29 22:01:41 +03:00
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
2017-08-23 18:46:41 +03:00
" %d: merge free, buffer %pK share page with %pK \n " ,
alloc - > pid ,
buffer - > data ,
next - > data ) ;
2017-06-29 22:01:41 +03:00
}
}
2017-08-23 18:46:41 +03:00
if ( PAGE_ALIGNED ( buffer - > data ) ) {
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: merge free, buffer start %pK is page aligned \n " ,
alloc - > pid , buffer - > data ) ;
to_free = false ;
}
if ( to_free ) {
2017-06-29 22:01:41 +03:00
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
2017-08-23 18:46:41 +03:00
" %d: merge free, buffer %pK do not share page with %pK or %pK \n " ,
alloc - > pid , buffer - > data ,
prev - > data , next - > data ) ;
binder_update_page_range ( alloc , 0 , buffer_start_page ( buffer ) ,
buffer_start_page ( buffer ) + PAGE_SIZE ,
NULL ) ;
2017-06-29 22:01:41 +03:00
}
2017-08-23 18:46:41 +03:00
list_del ( & buffer - > entry ) ;
kfree ( buffer ) ;
2017-06-29 22:01:41 +03:00
}
static void binder_free_buf_locked ( struct binder_alloc * alloc ,
struct binder_buffer * buffer )
{
size_t size , buffer_size ;
buffer_size = binder_alloc_buffer_size ( alloc , buffer ) ;
size = ALIGN ( buffer - > data_size , sizeof ( void * ) ) +
ALIGN ( buffer - > offsets_size , sizeof ( void * ) ) +
ALIGN ( buffer - > extra_buffers_size , sizeof ( void * ) ) ;
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
" %d: binder_free_buf %pK size %zd buffer_size %zd \n " ,
alloc - > pid , buffer , size , buffer_size ) ;
BUG_ON ( buffer - > free ) ;
BUG_ON ( size > buffer_size ) ;
BUG_ON ( buffer - > transaction ! = NULL ) ;
2017-08-23 18:46:41 +03:00
BUG_ON ( buffer - > data < alloc - > buffer ) ;
BUG_ON ( buffer - > data > alloc - > buffer + alloc - > buffer_size ) ;
2017-06-29 22:01:41 +03:00
if ( buffer - > async_transaction ) {
alloc - > free_async_space + = size + sizeof ( struct binder_buffer ) ;
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC_ASYNC ,
" %d: binder_free_buf size %zd async free %zd \n " ,
alloc - > pid , size , alloc - > free_async_space ) ;
}
binder_update_page_range ( alloc , 0 ,
( void * ) PAGE_ALIGN ( ( uintptr_t ) buffer - > data ) ,
( void * ) ( ( ( uintptr_t ) buffer - > data + buffer_size ) & PAGE_MASK ) ,
NULL ) ;
rb_erase ( & buffer - > rb_node , & alloc - > allocated_buffers ) ;
buffer - > free = 1 ;
if ( ! list_is_last ( & buffer - > entry , & alloc - > buffers ) ) {
2017-08-23 18:46:39 +03:00
struct binder_buffer * next = binder_buffer_next ( buffer ) ;
2017-06-29 22:01:41 +03:00
if ( next - > free ) {
rb_erase ( & next - > rb_node , & alloc - > free_buffers ) ;
binder_delete_free_buffer ( alloc , next ) ;
}
}
if ( alloc - > buffers . next ! = & buffer - > entry ) {
2017-08-23 18:46:39 +03:00
struct binder_buffer * prev = binder_buffer_prev ( buffer ) ;
2017-06-29 22:01:41 +03:00
if ( prev - > free ) {
binder_delete_free_buffer ( alloc , buffer ) ;
rb_erase ( & prev - > rb_node , & alloc - > free_buffers ) ;
buffer = prev ;
}
}
binder_insert_free_buffer ( alloc , buffer ) ;
}
/**
* binder_alloc_free_buf ( ) - free a binder buffer
* @ alloc : binder_alloc for this proc
* @ buffer : kernel pointer to buffer
*
* Free the buffer allocated via binder_alloc_new_buffer ( )
*/
void binder_alloc_free_buf ( struct binder_alloc * alloc ,
struct binder_buffer * buffer )
{
mutex_lock ( & alloc - > mutex ) ;
binder_free_buf_locked ( alloc , buffer ) ;
mutex_unlock ( & alloc - > mutex ) ;
}
/**
* binder_alloc_mmap_handler ( ) - map virtual address space for proc
* @ alloc : alloc structure for this proc
* @ vma : vma passed to mmap ( )
*
* Called by binder_mmap ( ) to initialize the space specified in
* vma for allocating binder buffers
*
* Return :
* 0 = success
* - EBUSY = address space already mapped
* - ENOMEM = failed to map memory to given address space
*/
int binder_alloc_mmap_handler ( struct binder_alloc * alloc ,
struct vm_area_struct * vma )
{
int ret ;
struct vm_struct * area ;
const char * failure_string ;
struct binder_buffer * buffer ;
mutex_lock ( & binder_alloc_mmap_lock ) ;
if ( alloc - > buffer ) {
ret = - EBUSY ;
failure_string = " already mapped " ;
goto err_already_mapped ;
}
area = get_vm_area ( vma - > vm_end - vma - > vm_start , VM_IOREMAP ) ;
if ( area = = NULL ) {
ret = - ENOMEM ;
failure_string = " get_vm_area " ;
goto err_get_vm_area_failed ;
}
alloc - > buffer = area - > addr ;
alloc - > user_buffer_offset =
vma - > vm_start - ( uintptr_t ) alloc - > buffer ;
mutex_unlock ( & binder_alloc_mmap_lock ) ;
# ifdef CONFIG_CPU_CACHE_VIPT
if ( cache_is_vipt_aliasing ( ) ) {
while ( CACHE_COLOUR (
( vma - > vm_start ^ ( uint32_t ) alloc - > buffer ) ) ) {
pr_info ( " %s: %d %lx-%lx maps %pK bad alignment \n " ,
__func__ , alloc - > pid , vma - > vm_start ,
vma - > vm_end , alloc - > buffer ) ;
vma - > vm_start + = PAGE_SIZE ;
}
}
# endif
alloc - > pages = kzalloc ( sizeof ( alloc - > pages [ 0 ] ) *
( ( vma - > vm_end - vma - > vm_start ) / PAGE_SIZE ) ,
GFP_KERNEL ) ;
if ( alloc - > pages = = NULL ) {
ret = - ENOMEM ;
failure_string = " alloc page array " ;
goto err_alloc_pages_failed ;
}
alloc - > buffer_size = vma - > vm_end - vma - > vm_start ;
2017-08-23 18:46:41 +03:00
buffer = kzalloc ( sizeof ( * buffer ) , GFP_KERNEL ) ;
if ( ! buffer ) {
2017-06-29 22:01:41 +03:00
ret = - ENOMEM ;
2017-08-23 18:46:41 +03:00
failure_string = " alloc buffer struct " ;
goto err_alloc_buf_struct_failed ;
2017-06-29 22:01:41 +03:00
}
2017-08-23 18:46:41 +03:00
buffer - > data = alloc - > buffer ;
2017-06-29 22:01:41 +03:00
list_add ( & buffer - > entry , & alloc - > buffers ) ;
buffer - > free = 1 ;
binder_insert_free_buffer ( alloc , buffer ) ;
alloc - > free_async_space = alloc - > buffer_size / 2 ;
barrier ( ) ;
alloc - > vma = vma ;
alloc - > vma_vm_mm = vma - > vm_mm ;
return 0 ;
2017-08-23 18:46:41 +03:00
err_alloc_buf_struct_failed :
2017-06-29 22:01:41 +03:00
kfree ( alloc - > pages ) ;
alloc - > pages = NULL ;
err_alloc_pages_failed :
mutex_lock ( & binder_alloc_mmap_lock ) ;
vfree ( alloc - > buffer ) ;
alloc - > buffer = NULL ;
err_get_vm_area_failed :
err_already_mapped :
mutex_unlock ( & binder_alloc_mmap_lock ) ;
pr_err ( " %s: %d %lx-%lx %s failed %d \n " , __func__ ,
alloc - > pid , vma - > vm_start , vma - > vm_end , failure_string , ret ) ;
return ret ;
}
void binder_alloc_deferred_release ( struct binder_alloc * alloc )
{
struct rb_node * n ;
int buffers , page_count ;
2017-08-23 18:46:41 +03:00
struct binder_buffer * buffer ;
2017-06-29 22:01:41 +03:00
BUG_ON ( alloc - > vma ) ;
buffers = 0 ;
mutex_lock ( & alloc - > mutex ) ;
while ( ( n = rb_first ( & alloc - > allocated_buffers ) ) ) {
buffer = rb_entry ( n , struct binder_buffer , rb_node ) ;
/* Transaction should already have been freed */
BUG_ON ( buffer - > transaction ) ;
binder_free_buf_locked ( alloc , buffer ) ;
buffers + + ;
}
2017-08-23 18:46:41 +03:00
while ( ! list_empty ( & alloc - > buffers ) ) {
buffer = list_first_entry ( & alloc - > buffers ,
struct binder_buffer , entry ) ;
WARN_ON ( ! buffer - > free ) ;
list_del ( & buffer - > entry ) ;
WARN_ON_ONCE ( ! list_empty ( & alloc - > buffers ) ) ;
kfree ( buffer ) ;
}
2017-06-29 22:01:41 +03:00
page_count = 0 ;
if ( alloc - > pages ) {
int i ;
for ( i = 0 ; i < alloc - > buffer_size / PAGE_SIZE ; i + + ) {
void * page_addr ;
2017-08-23 18:46:42 +03:00
bool on_lru ;
2017-06-29 22:01:41 +03:00
2017-08-23 18:46:42 +03:00
if ( ! alloc - > pages [ i ] . page_ptr )
2017-06-29 22:01:41 +03:00
continue ;
2017-08-23 18:46:42 +03:00
on_lru = list_lru_del ( & binder_alloc_lru ,
& alloc - > pages [ i ] . lru ) ;
2017-06-29 22:01:41 +03:00
page_addr = alloc - > buffer + i * PAGE_SIZE ;
binder_alloc_debug ( BINDER_DEBUG_BUFFER_ALLOC ,
2017-08-23 18:46:42 +03:00
" %s: %d: page %d at %pK %s \n " ,
__func__ , alloc - > pid , i , page_addr ,
on_lru ? " on lru " : " active " ) ;
2017-06-29 22:01:41 +03:00
unmap_kernel_range ( ( unsigned long ) page_addr , PAGE_SIZE ) ;
2017-08-23 18:46:42 +03:00
__free_page ( alloc - > pages [ i ] . page_ptr ) ;
2017-06-29 22:01:41 +03:00
page_count + + ;
}
kfree ( alloc - > pages ) ;
vfree ( alloc - > buffer ) ;
}
mutex_unlock ( & alloc - > mutex ) ;
binder_alloc_debug ( BINDER_DEBUG_OPEN_CLOSE ,
" %s: %d buffers %d, pages %d \n " ,
__func__ , alloc - > pid , buffers , page_count ) ;
}
static void print_binder_buffer ( struct seq_file * m , const char * prefix ,
struct binder_buffer * buffer )
{
2017-06-29 22:01:52 +03:00
seq_printf ( m , " %s %d: %pK size %zd:%zd:%zd %s \n " ,
2017-06-29 22:01:41 +03:00
prefix , buffer - > debug_id , buffer - > data ,
buffer - > data_size , buffer - > offsets_size ,
2017-06-29 22:01:52 +03:00
buffer - > extra_buffers_size ,
2017-06-29 22:01:41 +03:00
buffer - > transaction ? " active " : " delivered " ) ;
}
/**
* binder_alloc_print_allocated ( ) - print buffer info
* @ m : seq_file for output via seq_printf ( )
* @ alloc : binder_alloc for this proc
*
* Prints information about every buffer associated with
* the binder_alloc state to the given seq_file
*/
void binder_alloc_print_allocated ( struct seq_file * m ,
struct binder_alloc * alloc )
{
struct rb_node * n ;
mutex_lock ( & alloc - > mutex ) ;
for ( n = rb_first ( & alloc - > allocated_buffers ) ; n ! = NULL ; n = rb_next ( n ) )
print_binder_buffer ( m , " buffer " ,
rb_entry ( n , struct binder_buffer , rb_node ) ) ;
mutex_unlock ( & alloc - > mutex ) ;
}
2017-08-31 21:56:36 +03:00
/**
* binder_alloc_print_pages ( ) - print page usage
* @ m : seq_file for output via seq_printf ( )
* @ alloc : binder_alloc for this proc
*/
void binder_alloc_print_pages ( struct seq_file * m ,
struct binder_alloc * alloc )
{
struct binder_lru_page * page ;
int i ;
int active = 0 ;
int lru = 0 ;
int free = 0 ;
mutex_lock ( & alloc - > mutex ) ;
for ( i = 0 ; i < alloc - > buffer_size / PAGE_SIZE ; i + + ) {
page = & alloc - > pages [ i ] ;
if ( ! page - > page_ptr )
free + + ;
else if ( list_empty ( & page - > lru ) )
active + + ;
else
lru + + ;
}
mutex_unlock ( & alloc - > mutex ) ;
seq_printf ( m , " pages: %d:%d:%d \n " , active , lru , free ) ;
}
2017-06-29 22:01:41 +03:00
/**
* binder_alloc_get_allocated_count ( ) - return count of buffers
* @ alloc : binder_alloc for this proc
*
* Return : count of allocated buffers
*/
int binder_alloc_get_allocated_count ( struct binder_alloc * alloc )
{
struct rb_node * n ;
int count = 0 ;
mutex_lock ( & alloc - > mutex ) ;
for ( n = rb_first ( & alloc - > allocated_buffers ) ; n ! = NULL ; n = rb_next ( n ) )
count + + ;
mutex_unlock ( & alloc - > mutex ) ;
return count ;
}
/**
* binder_alloc_vma_close ( ) - invalidate address space
* @ alloc : binder_alloc for this proc
*
* Called from binder_vma_close ( ) when releasing address space .
* Clears alloc - > vma to prevent new incoming transactions from
* allocating more buffers .
*/
void binder_alloc_vma_close ( struct binder_alloc * alloc )
{
WRITE_ONCE ( alloc - > vma , NULL ) ;
WRITE_ONCE ( alloc - > vma_vm_mm , NULL ) ;
}
2017-08-23 18:46:42 +03:00
/**
* binder_alloc_free_page ( ) - shrinker callback to free pages
* @ item : item to free
* @ lock : lock protecting the item
* @ cb_arg : callback argument
*
* Called from list_lru_walk ( ) in binder_shrink_scan ( ) to free
* up pages when the system is under memory pressure .
*/
enum lru_status binder_alloc_free_page ( struct list_head * item ,
struct list_lru_one * lru ,
spinlock_t * lock ,
void * cb_arg )
{
struct mm_struct * mm = NULL ;
struct binder_lru_page * page = container_of ( item ,
struct binder_lru_page ,
lru ) ;
struct binder_alloc * alloc ;
uintptr_t page_addr ;
size_t index ;
2017-10-04 02:15:00 +03:00
struct vm_area_struct * vma ;
2017-08-23 18:46:42 +03:00
alloc = page - > alloc ;
if ( ! mutex_trylock ( & alloc - > mutex ) )
goto err_get_alloc_mutex_failed ;
if ( ! page - > page_ptr )
goto err_page_already_freed ;
index = page - alloc - > pages ;
page_addr = ( uintptr_t ) alloc - > buffer + index * PAGE_SIZE ;
2017-10-04 02:15:00 +03:00
vma = alloc - > vma ;
if ( vma ) {
2017-08-23 18:46:42 +03:00
mm = get_task_mm ( alloc - > tsk ) ;
if ( ! mm )
goto err_get_task_mm_failed ;
if ( ! down_write_trylock ( & mm - > mmap_sem ) )
goto err_down_write_mmap_sem_failed ;
2017-10-04 02:15:00 +03:00
}
list_lru_isolate ( lru , item ) ;
spin_unlock ( lock ) ;
2017-08-23 18:46:42 +03:00
2017-10-04 02:15:00 +03:00
if ( vma ) {
2017-08-23 18:46:43 +03:00
trace_binder_unmap_user_start ( alloc , index ) ;
2017-10-04 02:15:00 +03:00
zap_page_range ( vma ,
2017-08-23 18:46:42 +03:00
page_addr + alloc - > user_buffer_offset ,
PAGE_SIZE ) ;
2017-08-23 18:46:43 +03:00
trace_binder_unmap_user_end ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
up_write ( & mm - > mmap_sem ) ;
mmput ( mm ) ;
}
2017-08-23 18:46:43 +03:00
trace_binder_unmap_kernel_start ( alloc , index ) ;
2017-08-23 18:46:42 +03:00
unmap_kernel_range ( page_addr , PAGE_SIZE ) ;
__free_page ( page - > page_ptr ) ;
page - > page_ptr = NULL ;
2017-08-23 18:46:43 +03:00
trace_binder_unmap_kernel_end ( alloc , index ) ;
2017-10-04 02:15:00 +03:00
spin_lock ( lock ) ;
2017-08-23 18:46:42 +03:00
mutex_unlock ( & alloc - > mutex ) ;
2017-10-04 02:15:00 +03:00
return LRU_REMOVED_RETRY ;
2017-08-23 18:46:42 +03:00
err_down_write_mmap_sem_failed :
2017-10-04 02:15:00 +03:00
mmput_async ( mm ) ;
2017-08-23 18:46:42 +03:00
err_get_task_mm_failed :
err_page_already_freed :
mutex_unlock ( & alloc - > mutex ) ;
err_get_alloc_mutex_failed :
return LRU_SKIP ;
}
static unsigned long
binder_shrink_count ( struct shrinker * shrink , struct shrink_control * sc )
{
unsigned long ret = list_lru_count ( & binder_alloc_lru ) ;
return ret ;
}
static unsigned long
binder_shrink_scan ( struct shrinker * shrink , struct shrink_control * sc )
{
unsigned long ret ;
ret = list_lru_walk ( & binder_alloc_lru , binder_alloc_free_page ,
NULL , sc - > nr_to_scan ) ;
return ret ;
}
struct shrinker binder_shrinker = {
. count_objects = binder_shrink_count ,
. scan_objects = binder_shrink_scan ,
. seeks = DEFAULT_SEEKS ,
} ;
2017-06-29 22:01:41 +03:00
/**
* binder_alloc_init ( ) - called by binder_open ( ) for per - proc initialization
* @ alloc : binder_alloc for this proc
*
* Called from binder_open ( ) to initialize binder_alloc fields for
* new binder proc
*/
void binder_alloc_init ( struct binder_alloc * alloc )
{
alloc - > tsk = current - > group_leader ;
alloc - > pid = current - > group_leader - > pid ;
mutex_init ( & alloc - > mutex ) ;
2017-08-31 20:26:06 +03:00
INIT_LIST_HEAD ( & alloc - > buffers ) ;
2017-06-29 22:01:41 +03:00
}
2017-08-23 18:46:42 +03:00
void binder_alloc_shrinker_init ( void )
{
list_lru_init ( & binder_alloc_lru ) ;
register_shrinker ( & binder_shrinker ) ;
}