| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2017 Google, Inc. |
| 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_BINDER_ALLOC_H |
| 7 | #define _LINUX_BINDER_ALLOC_H |
| 8 | |
| 9 | #include <linux/rbtree.h> |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/rtmutex.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/list_lru.h> |
| 16 | #include <uapi/linux/android/binder.h> |
| 17 | |
| 18 | extern struct list_lru binder_freelist; |
| 19 | struct binder_transaction; |
| 20 | |
| 21 | /** |
| 22 | * struct binder_buffer - buffer used for binder transactions |
| 23 | * @entry: entry alloc->buffers |
| 24 | * @rb_node: node for allocated_buffers/free_buffers rb trees |
| 25 | * @free: %true if buffer is free |
| 26 | * @clear_on_free: %true if buffer must be zeroed after use |
| 27 | * @allow_user_free: %true if user is allowed to free buffer |
| 28 | * @async_transaction: %true if buffer is in use for an async txn |
| 29 | * @oneway_spam_suspect: %true if total async allocate size just exceed |
| 30 | * spamming detect threshold |
| 31 | * @debug_id: unique ID for debugging |
| 32 | * @transaction: pointer to associated struct binder_transaction |
| 33 | * @target_node: struct binder_node associated with this buffer |
| 34 | * @data_size: size of @transaction data |
| 35 | * @offsets_size: size of array of offsets |
| 36 | * @extra_buffers_size: size of space for other objects (like sg lists) |
| 37 | * @user_data: user pointer to base of buffer space |
| 38 | * @pid: pid to attribute the buffer to (caller) |
| 39 | * |
| 40 | * Bookkeeping structure for binder transaction buffers |
| 41 | */ |
| 42 | struct binder_buffer { |
| 43 | struct list_head entry; /* free and allocated entries by address */ |
| 44 | struct rb_node rb_node; /* free entry by size or allocated entry */ |
| 45 | /* by address */ |
| 46 | unsigned free:1; |
| 47 | unsigned clear_on_free:1; |
| 48 | unsigned allow_user_free:1; |
| 49 | unsigned async_transaction:1; |
| 50 | unsigned oneway_spam_suspect:1; |
| 51 | unsigned debug_id:27; |
| 52 | struct binder_transaction *transaction; |
| 53 | struct binder_node *target_node; |
| 54 | size_t data_size; |
| 55 | size_t offsets_size; |
| 56 | size_t ; |
| 57 | unsigned long user_data; |
| 58 | int pid; |
| 59 | }; |
| 60 | |
| 61 | /** |
| 62 | * struct binder_shrinker_mdata - binder metadata used to reclaim pages |
| 63 | * @lru: LRU entry in binder_freelist |
| 64 | * @alloc: binder_alloc owning the page to reclaim |
| 65 | * @page_index: offset in @alloc->pages[] into the page to reclaim |
| 66 | */ |
| 67 | struct binder_shrinker_mdata { |
| 68 | struct list_head lru; |
| 69 | struct binder_alloc *alloc; |
| 70 | unsigned long page_index; |
| 71 | }; |
| 72 | |
| 73 | static inline struct list_head *page_to_lru(struct page *p) |
| 74 | { |
| 75 | struct binder_shrinker_mdata *mdata; |
| 76 | |
| 77 | mdata = (struct binder_shrinker_mdata *)page_private(p); |
| 78 | |
| 79 | return &mdata->lru; |
| 80 | } |
| 81 | |
| 82 | /** |
| 83 | * struct binder_alloc - per-binder proc state for binder allocator |
| 84 | * @mutex: protects binder_alloc fields |
| 85 | * @mm: copy of task->mm (invariant after open) |
| 86 | * @vm_start: base of per-proc address space mapped via mmap |
| 87 | * @buffers: list of all buffers for this proc |
| 88 | * @free_buffers: rb tree of buffers available for allocation |
| 89 | * sorted by size |
| 90 | * @allocated_buffers: rb tree of allocated buffers sorted by address |
| 91 | * @free_async_space: VA space available for async buffers. This is |
| 92 | * initialized at mmap time to 1/2 the full VA space |
| 93 | * @pages: array of struct page * |
| 94 | * @buffer_size: size of address space specified via mmap |
| 95 | * @pid: pid for associated binder_proc (invariant after init) |
| 96 | * @pages_high: high watermark of offset in @pages |
| 97 | * @mapped: whether the vm area is mapped, each binder instance is |
| 98 | * allowed a single mapping throughout its lifetime |
| 99 | * @oneway_spam_detected: %true if oneway spam detection fired, clear that |
| 100 | * flag once the async buffer has returned to a healthy state |
| 101 | * |
| 102 | * Bookkeeping structure for per-proc address space management for binder |
| 103 | * buffers. It is normally initialized during binder_init() and binder_mmap() |
| 104 | * calls. The address space is used for both user-visible buffers and for |
| 105 | * struct binder_buffer objects used to track the user buffers |
| 106 | */ |
| 107 | struct binder_alloc { |
| 108 | struct mutex mutex; |
| 109 | struct mm_struct *mm; |
| 110 | unsigned long vm_start; |
| 111 | struct list_head buffers; |
| 112 | struct rb_root free_buffers; |
| 113 | struct rb_root allocated_buffers; |
| 114 | size_t free_async_space; |
| 115 | struct page **pages; |
| 116 | size_t buffer_size; |
| 117 | int pid; |
| 118 | size_t pages_high; |
| 119 | bool mapped; |
| 120 | bool oneway_spam_detected; |
| 121 | }; |
| 122 | |
| 123 | #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST |
| 124 | void binder_selftest_alloc(struct binder_alloc *alloc); |
| 125 | #else |
| 126 | static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} |
| 127 | #endif |
| 128 | enum lru_status binder_alloc_free_page(struct list_head *item, |
| 129 | struct list_lru_one *lru, |
| 130 | void *cb_arg); |
| 131 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, |
| 132 | size_t data_size, |
| 133 | size_t offsets_size, |
| 134 | size_t , |
| 135 | int is_async); |
| 136 | void binder_alloc_init(struct binder_alloc *alloc); |
| 137 | int binder_alloc_shrinker_init(void); |
| 138 | void binder_alloc_shrinker_exit(void); |
| 139 | void binder_alloc_vma_close(struct binder_alloc *alloc); |
| 140 | struct binder_buffer * |
| 141 | binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
| 142 | unsigned long user_ptr); |
| 143 | void binder_alloc_free_buf(struct binder_alloc *alloc, |
| 144 | struct binder_buffer *buffer); |
| 145 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, |
| 146 | struct vm_area_struct *vma); |
| 147 | void binder_alloc_deferred_release(struct binder_alloc *alloc); |
| 148 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc); |
| 149 | void binder_alloc_print_allocated(struct seq_file *m, |
| 150 | struct binder_alloc *alloc); |
| 151 | void binder_alloc_print_pages(struct seq_file *m, |
| 152 | struct binder_alloc *alloc); |
| 153 | |
| 154 | /** |
| 155 | * binder_alloc_get_free_async_space() - get free space available for async |
| 156 | * @alloc: binder_alloc for this proc |
| 157 | * |
| 158 | * Return: the bytes remaining in the address-space for async transactions |
| 159 | */ |
| 160 | static inline size_t |
| 161 | binder_alloc_get_free_async_space(struct binder_alloc *alloc) |
| 162 | { |
| 163 | size_t free_async_space; |
| 164 | |
| 165 | mutex_lock(&alloc->mutex); |
| 166 | free_async_space = alloc->free_async_space; |
| 167 | mutex_unlock(lock: &alloc->mutex); |
| 168 | return free_async_space; |
| 169 | } |
| 170 | |
| 171 | unsigned long |
| 172 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, |
| 173 | struct binder_buffer *buffer, |
| 174 | binder_size_t buffer_offset, |
| 175 | const void __user *from, |
| 176 | size_t bytes); |
| 177 | |
| 178 | int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, |
| 179 | struct binder_buffer *buffer, |
| 180 | binder_size_t buffer_offset, |
| 181 | void *src, |
| 182 | size_t bytes); |
| 183 | |
| 184 | int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, |
| 185 | void *dest, |
| 186 | struct binder_buffer *buffer, |
| 187 | binder_size_t buffer_offset, |
| 188 | size_t bytes); |
| 189 | |
| 190 | #endif /* _LINUX_BINDER_ALLOC_H */ |
| 191 | |
| 192 | |