2019-05-29 07:17:56 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
2017-06-29 12:01:41 -07:00
/*
* Copyright ( C ) 2017 Google , Inc .
*/
# ifndef _LINUX_BINDER_ALLOC_H
# define _LINUX_BINDER_ALLOC_H
# include <linux/rbtree.h>
# include <linux/list.h>
# include <linux/mm.h>
2023-12-01 17:21:57 +00:00
# include <linux/spinlock.h>
2017-06-29 12:01:41 -07:00
# include <linux/vmalloc.h>
# include <linux/slab.h>
2017-08-23 08:46:42 -07:00
# include <linux/list_lru.h>
2019-02-08 10:35:14 -08:00
# include <uapi/linux/android/binder.h>
2017-06-29 12:01:41 -07:00
2023-12-01 17:21:51 +00:00
extern struct list_lru binder_freelist ;
2017-06-29 12:01:41 -07:00
struct binder_transaction ;
/**
* struct binder_buffer - buffer used for binder transactions
* @ entry : entry alloc - > buffers
* @ rb_node : node for allocated_buffers / free_buffers rb trees
2018-12-05 15:19:25 -08:00
* @ free : % true if buffer is free
2020-11-20 15:37:43 -08:00
* @ clear_on_free : % true if buffer must be zeroed after use
2018-12-05 15:19:25 -08:00
* @ allow_user_free : % true if user is allowed to free buffer
* @ async_transaction : % true if buffer is in use for an async txn
2021-04-09 17:40:46 +08:00
* @ oneway_spam_suspect : % true if total async allocate size just exceed
* spamming detect threshold
2018-12-05 15:19:25 -08:00
* @ debug_id : unique ID for debugging
* @ transaction : pointer to associated struct binder_transaction
* @ target_node : struct binder_node associated with this buffer
* @ data_size : size of @ transaction data
* @ offsets_size : size of array of offsets
* @ extra_buffers_size : size of space for other objects ( like sg lists )
2019-02-08 10:35:20 -08:00
* @ user_data : user pointer to base of buffer space
2020-08-21 14:25:44 +02:00
* @ pid : pid to attribute the buffer to ( caller )
2017-06-29 12:01:41 -07:00
*
* Bookkeeping structure for binder transaction buffers
*/
struct binder_buffer {
struct list_head entry ; /* free and allocated entries by address */
struct rb_node rb_node ; /* free entry by size or allocated entry */
/* by address */
unsigned free : 1 ;
2020-11-20 15:37:43 -08:00
unsigned clear_on_free : 1 ;
2017-06-29 12:01:41 -07:00
unsigned allow_user_free : 1 ;
unsigned async_transaction : 1 ;
2021-04-09 17:40:46 +08:00
unsigned oneway_spam_suspect : 1 ;
unsigned debug_id : 27 ;
2017-06-29 12:01:41 -07:00
struct binder_transaction * transaction ;
struct binder_node * target_node ;
size_t data_size ;
size_t offsets_size ;
size_t extra_buffers_size ;
2023-12-01 17:21:38 +00:00
unsigned long user_data ;
2023-12-01 17:21:41 +00:00
int pid ;
2017-06-29 12:01:41 -07:00
} ;
2017-08-23 08:46:42 -07:00
/**
* struct binder_lru_page - page object used for binder shrinker
* @ page_ptr : pointer to physical page in mmap ' d space
2023-12-01 17:21:51 +00:00
* @ lru : entry in binder_freelist
2017-08-23 08:46:42 -07:00
* @ alloc : binder_alloc for a proc
*/
struct binder_lru_page {
struct list_head lru ;
struct page * page_ptr ;
struct binder_alloc * alloc ;
} ;
2017-06-29 12:01:41 -07:00
/**
* struct binder_alloc - per - binder proc state for binder allocator
2023-12-01 17:21:57 +00:00
* @ lock : protects binder_alloc fields
2023-05-02 20:12:18 +00:00
* @ vma : vm_area_struct passed to mmap_handler
2022-08-29 20:12:49 +00:00
* ( invariant after mmap )
2022-09-06 13:59:45 +00:00
* @ mm : copy of task - > mm ( invariant after open )
2017-06-29 12:01:41 -07:00
* @ buffer : base of per - proc address space mapped via mmap
* @ buffers : list of all buffers for this proc
* @ free_buffers : rb tree of buffers available for allocation
* sorted by size
* @ allocated_buffers : rb tree of allocated buffers sorted by address
* @ free_async_space : VA space available for async buffers . This is
* initialized at mmap time to 1 / 2 the full VA space
2017-08-23 08:46:42 -07:00
* @ pages : array of binder_lru_page
2017-06-29 12:01:41 -07:00
* @ buffer_size : size of address space specified via mmap
* @ pid : pid for associated binder_proc ( invariant after init )
2017-11-13 10:06:56 +01:00
* @ pages_high : high watermark of offset in @ pages
2021-04-09 17:40:46 +08:00
* @ oneway_spam_detected : % true if oneway spam detection fired , clear that
* flag once the async buffer has returned to a healthy state
2017-06-29 12:01:41 -07:00
*
* Bookkeeping structure for per - proc address space management for binder
* buffers . It is normally initialized during binder_init ( ) and binder_mmap ( )
* calls . The address space is used for both user - visible buffers and for
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
2023-12-01 17:21:57 +00:00
spinlock_t lock ;
2023-05-02 20:12:18 +00:00
struct vm_area_struct * vma ;
2022-09-06 13:59:45 +00:00
struct mm_struct * mm ;
2023-12-01 17:21:38 +00:00
unsigned long buffer ;
2017-06-29 12:01:41 -07:00
struct list_head buffers ;
struct rb_root free_buffers ;
struct rb_root allocated_buffers ;
size_t free_async_space ;
2017-08-23 08:46:42 -07:00
struct binder_lru_page * pages ;
2017-06-29 12:01:41 -07:00
size_t buffer_size ;
int pid ;
2017-11-13 10:06:56 +01:00
size_t pages_high ;
2021-04-09 17:40:46 +08:00
bool oneway_spam_detected ;
2017-06-29 12:01:41 -07:00
} ;
2017-08-23 08:46:40 -07:00
# ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
void binder_selftest_alloc ( struct binder_alloc * alloc ) ;
# else
static inline void binder_selftest_alloc ( struct binder_alloc * alloc ) { }
# endif
2017-08-23 08:46:42 -07:00
enum lru_status binder_alloc_free_page ( struct list_head * item ,
struct list_lru_one * lru ,
spinlock_t * lock , void * cb_arg ) ;
2023-12-01 17:21:37 +00:00
struct binder_buffer * binder_alloc_new_buf ( struct binder_alloc * alloc ,
size_t data_size ,
size_t offsets_size ,
size_t extra_buffers_size ,
2023-12-01 17:21:41 +00:00
int is_async ) ;
2023-12-01 17:21:37 +00:00
void binder_alloc_init ( struct binder_alloc * alloc ) ;
int binder_alloc_shrinker_init ( void ) ;
void binder_alloc_shrinker_exit ( void ) ;
void binder_alloc_vma_close ( struct binder_alloc * alloc ) ;
struct binder_buffer *
2017-06-29 12:01:51 -07:00
binder_alloc_prepare_to_free ( struct binder_alloc * alloc ,
2023-12-01 17:21:38 +00:00
unsigned long user_ptr ) ;
2023-12-01 17:21:37 +00:00
void binder_alloc_free_buf ( struct binder_alloc * alloc ,
struct binder_buffer * buffer ) ;
int binder_alloc_mmap_handler ( struct binder_alloc * alloc ,
struct vm_area_struct * vma ) ;
void binder_alloc_deferred_release ( struct binder_alloc * alloc ) ;
int binder_alloc_get_allocated_count ( struct binder_alloc * alloc ) ;
void binder_alloc_print_allocated ( struct seq_file * m ,
struct binder_alloc * alloc ) ;
2017-08-31 11:56:36 -07:00
void binder_alloc_print_pages ( struct seq_file * m ,
struct binder_alloc * alloc ) ;
2017-06-29 12:01:41 -07:00
/**
* binder_alloc_get_free_async_space ( ) - get free space available for async
* @ alloc : binder_alloc for this proc
*
* Return : the bytes remaining in the address - space for async transactions
*/
static inline size_t
binder_alloc_get_free_async_space ( struct binder_alloc * alloc )
{
size_t free_async_space ;
2023-12-01 17:21:57 +00:00
spin_lock ( & alloc - > lock ) ;
2017-06-29 12:01:41 -07:00
free_async_space = alloc - > free_async_space ;
2023-12-01 17:21:57 +00:00
spin_unlock ( & alloc - > lock ) ;
2017-06-29 12:01:41 -07:00
return free_async_space ;
}
2019-02-08 10:35:14 -08:00
unsigned long
binder_alloc_copy_user_to_buffer ( struct binder_alloc * alloc ,
struct binder_buffer * buffer ,
binder_size_t buffer_offset ,
const void __user * from ,
size_t bytes ) ;
2019-06-28 09:50:12 -07:00
int binder_alloc_copy_to_buffer ( struct binder_alloc * alloc ,
struct binder_buffer * buffer ,
binder_size_t buffer_offset ,
void * src ,
size_t bytes ) ;
int binder_alloc_copy_from_buffer ( struct binder_alloc * alloc ,
void * dest ,
struct binder_buffer * buffer ,
binder_size_t buffer_offset ,
size_t bytes ) ;
2019-02-08 10:35:15 -08:00
2017-06-29 12:01:41 -07:00
# endif /* _LINUX_BINDER_ALLOC_H */