1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2017 Google, Inc.
4 */
5
6#ifndef _LINUX_BINDER_ALLOC_H
7#define _LINUX_BINDER_ALLOC_H
8
9#include <linux/rbtree.h>
10#include <linux/list.h>
11#include <linux/mm.h>
12#include <linux/spinlock.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/list_lru.h>
16#include <uapi/linux/android/binder.h>
17
18extern struct list_lru binder_freelist;
19struct binder_transaction;
20
21/**
22 * struct binder_buffer - buffer used for binder transactions
23 * @entry:              entry alloc->buffers
24 * @rb_node:            node for allocated_buffers/free_buffers rb trees
25 * @free:               %true if buffer is free
26 * @clear_on_free:      %true if buffer must be zeroed after use
27 * @allow_user_free:    %true if user is allowed to free buffer
28 * @async_transaction:  %true if buffer is in use for an async txn
29 * @oneway_spam_suspect: %true if total async allocate size just exceed
30 * spamming detect threshold
31 * @debug_id:           unique ID for debugging
32 * @transaction:        pointer to associated struct binder_transaction
33 * @target_node:        struct binder_node associated with this buffer
34 * @data_size:          size of @transaction data
35 * @offsets_size:       size of array of offsets
36 * @extra_buffers_size: size of space for other objects (like sg lists)
37 * @user_data:          user pointer to base of buffer space
38 * @pid:                pid to attribute the buffer to (caller)
39 *
40 * Bookkeeping structure for binder transaction buffers
41 */
42struct binder_buffer {
43	struct list_head entry; /* free and allocated entries by address */
44	struct rb_node rb_node; /* free entry by size or allocated entry */
45				/* by address */
46	unsigned free:1;
47	unsigned clear_on_free:1;
48	unsigned allow_user_free:1;
49	unsigned async_transaction:1;
50	unsigned oneway_spam_suspect:1;
51	unsigned debug_id:27;
52	struct binder_transaction *transaction;
53	struct binder_node *target_node;
54	size_t data_size;
55	size_t offsets_size;
56	size_t extra_buffers_size;
57	unsigned long user_data;
58	int pid;
59};
60
61/**
62 * struct binder_lru_page - page object used for binder shrinker
63 * @page_ptr: pointer to physical page in mmap'd space
64 * @lru:      entry in binder_freelist
65 * @alloc:    binder_alloc for a proc
66 */
67struct binder_lru_page {
68	struct list_head lru;
69	struct page *page_ptr;
70	struct binder_alloc *alloc;
71};
72
73/**
74 * struct binder_alloc - per-binder proc state for binder allocator
75 * @lock:               protects binder_alloc fields
76 * @vma:                vm_area_struct passed to mmap_handler
77 *                      (invariant after mmap)
78 * @mm:                 copy of task->mm (invariant after open)
79 * @buffer:             base of per-proc address space mapped via mmap
80 * @buffers:            list of all buffers for this proc
81 * @free_buffers:       rb tree of buffers available for allocation
82 *                      sorted by size
83 * @allocated_buffers:  rb tree of allocated buffers sorted by address
84 * @free_async_space:   VA space available for async buffers. This is
85 *                      initialized at mmap time to 1/2 the full VA space
86 * @pages:              array of binder_lru_page
87 * @buffer_size:        size of address space specified via mmap
88 * @pid:                pid for associated binder_proc (invariant after init)
89 * @pages_high:         high watermark of offset in @pages
90 * @oneway_spam_detected: %true if oneway spam detection fired, clear that
91 * flag once the async buffer has returned to a healthy state
92 *
93 * Bookkeeping structure for per-proc address space management for binder
94 * buffers. It is normally initialized during binder_init() and binder_mmap()
95 * calls. The address space is used for both user-visible buffers and for
96 * struct binder_buffer objects used to track the user buffers
97 */
98struct binder_alloc {
99	spinlock_t lock;
100	struct vm_area_struct *vma;
101	struct mm_struct *mm;
102	unsigned long buffer;
103	struct list_head buffers;
104	struct rb_root free_buffers;
105	struct rb_root allocated_buffers;
106	size_t free_async_space;
107	struct binder_lru_page *pages;
108	size_t buffer_size;
109	int pid;
110	size_t pages_high;
111	bool oneway_spam_detected;
112};
113
114#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
115void binder_selftest_alloc(struct binder_alloc *alloc);
116#else
117static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
118#endif
119enum lru_status binder_alloc_free_page(struct list_head *item,
120				       struct list_lru_one *lru,
121				       spinlock_t *lock, void *cb_arg);
122struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
123					   size_t data_size,
124					   size_t offsets_size,
125					   size_t extra_buffers_size,
126					   int is_async);
127void binder_alloc_init(struct binder_alloc *alloc);
128int binder_alloc_shrinker_init(void);
129void binder_alloc_shrinker_exit(void);
130void binder_alloc_vma_close(struct binder_alloc *alloc);
131struct binder_buffer *
132binder_alloc_prepare_to_free(struct binder_alloc *alloc,
133			     unsigned long user_ptr);
134void binder_alloc_free_buf(struct binder_alloc *alloc,
135			   struct binder_buffer *buffer);
136int binder_alloc_mmap_handler(struct binder_alloc *alloc,
137			      struct vm_area_struct *vma);
138void binder_alloc_deferred_release(struct binder_alloc *alloc);
139int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
140void binder_alloc_print_allocated(struct seq_file *m,
141				  struct binder_alloc *alloc);
142void binder_alloc_print_pages(struct seq_file *m,
143			      struct binder_alloc *alloc);
144
145/**
146 * binder_alloc_get_free_async_space() - get free space available for async
147 * @alloc:	binder_alloc for this proc
148 *
149 * Return:	the bytes remaining in the address-space for async transactions
150 */
151static inline size_t
152binder_alloc_get_free_async_space(struct binder_alloc *alloc)
153{
154	size_t free_async_space;
155
156	spin_lock(&alloc->lock);
157	free_async_space = alloc->free_async_space;
158	spin_unlock(&alloc->lock);
159	return free_async_space;
160}
161
162unsigned long
163binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
164				 struct binder_buffer *buffer,
165				 binder_size_t buffer_offset,
166				 const void __user *from,
167				 size_t bytes);
168
169int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
170				struct binder_buffer *buffer,
171				binder_size_t buffer_offset,
172				void *src,
173				size_t bytes);
174
175int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
176				  void *dest,
177				  struct binder_buffer *buffer,
178				  binder_size_t buffer_offset,
179				  size_t bytes);
180
181#endif /* _LINUX_BINDER_ALLOC_H */
182
183