1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2014-2018 Intel Corporation |
4 | */ |
5 | |
6 | #include "gem/i915_gem_internal.h" |
7 | #include "gem/i915_gem_object.h" |
8 | |
9 | #include "i915_drv.h" |
10 | #include "intel_engine_pm.h" |
11 | #include "intel_gt_buffer_pool.h" |
12 | |
13 | static struct list_head * |
14 | bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) |
15 | { |
16 | int n; |
17 | |
18 | /* |
19 | * Compute a power-of-two bucket, but throw everything greater than |
20 | * 16KiB into the same bucket: i.e. the buckets hold objects of |
21 | * (1 page, 2 pages, 4 pages, 8+ pages). |
22 | */ |
23 | n = fls(x: sz >> PAGE_SHIFT) - 1; |
24 | if (n >= ARRAY_SIZE(pool->cache_list)) |
25 | n = ARRAY_SIZE(pool->cache_list) - 1; |
26 | |
27 | return &pool->cache_list[n]; |
28 | } |
29 | |
30 | static void node_free(struct intel_gt_buffer_pool_node *node) |
31 | { |
32 | i915_gem_object_put(obj: node->obj); |
33 | i915_active_fini(ref: &node->active); |
34 | kfree_rcu(node, rcu); |
35 | } |
36 | |
37 | static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) |
38 | { |
39 | struct intel_gt_buffer_pool_node *node, *stale = NULL; |
40 | bool active = false; |
41 | int n; |
42 | |
43 | /* Free buffers that have not been used in the past second */ |
44 | for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { |
45 | struct list_head *list = &pool->cache_list[n]; |
46 | |
47 | if (list_empty(head: list)) |
48 | continue; |
49 | |
50 | if (spin_trylock_irq(lock: &pool->lock)) { |
51 | struct list_head *pos; |
52 | |
53 | /* Most recent at head; oldest at tail */ |
54 | list_for_each_prev(pos, list) { |
55 | unsigned long age; |
56 | |
57 | node = list_entry(pos, typeof(*node), link); |
58 | |
59 | age = READ_ONCE(node->age); |
60 | if (!age || jiffies - age < keep) |
61 | break; |
62 | |
63 | /* Check we are the first to claim this node */ |
64 | if (!xchg(&node->age, 0)) |
65 | break; |
66 | |
67 | node->free = stale; |
68 | stale = node; |
69 | } |
70 | if (!list_is_last(list: pos, head: list)) |
71 | __list_del_many(head: pos, first: list); |
72 | |
73 | spin_unlock_irq(lock: &pool->lock); |
74 | } |
75 | |
76 | active |= !list_empty(head: list); |
77 | } |
78 | |
79 | while ((node = stale)) { |
80 | stale = stale->free; |
81 | node_free(node); |
82 | } |
83 | |
84 | return active; |
85 | } |
86 | |
87 | static void pool_free_work(struct work_struct *wrk) |
88 | { |
89 | struct intel_gt_buffer_pool *pool = |
90 | container_of(wrk, typeof(*pool), work.work); |
91 | struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); |
92 | |
93 | if (pool_free_older_than(pool, HZ)) |
94 | queue_delayed_work(wq: gt->i915->unordered_wq, dwork: &pool->work, |
95 | delay: round_jiffies_up_relative(HZ)); |
96 | } |
97 | |
98 | static void pool_retire(struct i915_active *ref) |
99 | { |
100 | struct intel_gt_buffer_pool_node *node = |
101 | container_of(ref, typeof(*node), active); |
102 | struct intel_gt_buffer_pool *pool = node->pool; |
103 | struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); |
104 | struct list_head *list = bucket_for_size(pool, sz: node->obj->base.size); |
105 | unsigned long flags; |
106 | |
107 | if (node->pinned) { |
108 | i915_gem_object_unpin_pages(obj: node->obj); |
109 | |
110 | /* Return this object to the shrinker pool */ |
111 | i915_gem_object_make_purgeable(obj: node->obj); |
112 | node->pinned = false; |
113 | } |
114 | |
115 | GEM_BUG_ON(node->age); |
116 | spin_lock_irqsave(&pool->lock, flags); |
117 | list_add_rcu(new: &node->link, head: list); |
118 | WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ |
119 | spin_unlock_irqrestore(lock: &pool->lock, flags); |
120 | |
121 | queue_delayed_work(wq: gt->i915->unordered_wq, dwork: &pool->work, |
122 | delay: round_jiffies_up_relative(HZ)); |
123 | } |
124 | |
125 | void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node) |
126 | { |
127 | assert_object_held(node->obj); |
128 | |
129 | if (node->pinned) |
130 | return; |
131 | |
132 | __i915_gem_object_pin_pages(obj: node->obj); |
133 | /* Hide this pinned object from the shrinker until retired */ |
134 | i915_gem_object_make_unshrinkable(obj: node->obj); |
135 | node->pinned = true; |
136 | } |
137 | |
138 | static struct intel_gt_buffer_pool_node * |
139 | node_create(struct intel_gt_buffer_pool *pool, size_t sz, |
140 | enum i915_map_type type) |
141 | { |
142 | struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); |
143 | struct intel_gt_buffer_pool_node *node; |
144 | struct drm_i915_gem_object *obj; |
145 | |
146 | node = kmalloc(size: sizeof(*node), |
147 | GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); |
148 | if (!node) |
149 | return ERR_PTR(error: -ENOMEM); |
150 | |
151 | node->age = 0; |
152 | node->pool = pool; |
153 | node->pinned = false; |
154 | i915_active_init(&node->active, NULL, pool_retire, 0); |
155 | |
156 | obj = i915_gem_object_create_internal(i915: gt->i915, size: sz); |
157 | if (IS_ERR(ptr: obj)) { |
158 | i915_active_fini(ref: &node->active); |
159 | kfree(objp: node); |
160 | return ERR_CAST(ptr: obj); |
161 | } |
162 | |
163 | i915_gem_object_set_readonly(obj); |
164 | |
165 | node->type = type; |
166 | node->obj = obj; |
167 | return node; |
168 | } |
169 | |
170 | struct intel_gt_buffer_pool_node * |
171 | intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, |
172 | enum i915_map_type type) |
173 | { |
174 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
175 | struct intel_gt_buffer_pool_node *node; |
176 | struct list_head *list; |
177 | int ret; |
178 | |
179 | size = PAGE_ALIGN(size); |
180 | list = bucket_for_size(pool, sz: size); |
181 | |
182 | rcu_read_lock(); |
183 | list_for_each_entry_rcu(node, list, link) { |
184 | unsigned long age; |
185 | |
186 | if (node->obj->base.size < size) |
187 | continue; |
188 | |
189 | if (node->type != type) |
190 | continue; |
191 | |
192 | age = READ_ONCE(node->age); |
193 | if (!age) |
194 | continue; |
195 | |
196 | if (cmpxchg(&node->age, age, 0) == age) { |
197 | spin_lock_irq(lock: &pool->lock); |
198 | list_del_rcu(entry: &node->link); |
199 | spin_unlock_irq(lock: &pool->lock); |
200 | break; |
201 | } |
202 | } |
203 | rcu_read_unlock(); |
204 | |
205 | if (&node->link == list) { |
206 | node = node_create(pool, sz: size, type); |
207 | if (IS_ERR(ptr: node)) |
208 | return node; |
209 | } |
210 | |
211 | ret = i915_active_acquire(ref: &node->active); |
212 | if (ret) { |
213 | node_free(node); |
214 | return ERR_PTR(error: ret); |
215 | } |
216 | |
217 | return node; |
218 | } |
219 | |
220 | void intel_gt_init_buffer_pool(struct intel_gt *gt) |
221 | { |
222 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
223 | int n; |
224 | |
225 | spin_lock_init(&pool->lock); |
226 | for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) |
227 | INIT_LIST_HEAD(list: &pool->cache_list[n]); |
228 | INIT_DELAYED_WORK(&pool->work, pool_free_work); |
229 | } |
230 | |
231 | void intel_gt_flush_buffer_pool(struct intel_gt *gt) |
232 | { |
233 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
234 | |
235 | do { |
236 | while (pool_free_older_than(pool, keep: 0)) |
237 | ; |
238 | } while (cancel_delayed_work_sync(dwork: &pool->work)); |
239 | } |
240 | |
241 | void intel_gt_fini_buffer_pool(struct intel_gt *gt) |
242 | { |
243 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
244 | int n; |
245 | |
246 | for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) |
247 | GEM_BUG_ON(!list_empty(&pool->cache_list[n])); |
248 | } |
249 | |