1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * |
3 | * page_pool/helpers.h |
4 | * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> |
5 | * Copyright (C) 2016 Red Hat, Inc. |
6 | */ |
7 | |
8 | /** |
9 | * DOC: page_pool allocator |
10 | * |
11 | * The page_pool allocator is optimized for recycling page or page fragment used |
12 | * by skb packet and xdp frame. |
13 | * |
14 | * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), |
15 | * which allocate memory with or without page splitting depending on the |
16 | * requested memory size. |
17 | * |
18 | * If the driver knows that it always requires full pages or its allocations are |
19 | * always smaller than half a page, it can use one of the more specific API |
20 | * calls: |
21 | * |
22 | * 1. page_pool_alloc_pages(): allocate memory without page splitting when |
23 | * driver knows that the memory it need is always bigger than half of the page |
24 | * allocated from page pool. There is no cache line dirtying for 'struct page' |
25 | * when a page is recycled back to the page pool. |
26 | * |
27 | * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver |
28 | * knows that the memory it need is always smaller than or equal to half of the |
29 | * page allocated from page pool. Page splitting enables memory saving and thus |
30 | * avoids TLB/cache miss for data access, but there also is some cost to |
31 | * implement page splitting, mainly some cache line dirtying/bouncing for |
32 | * 'struct page' and atomic operation for page->pp_ref_count. |
33 | * |
34 | * The API keeps track of in-flight pages, in order to let API users know when |
35 | * it is safe to free a page_pool object, the API users must call |
36 | * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or |
37 | * attach the page_pool object to a page_pool-aware object like skbs marked with |
38 | * skb_mark_for_recycle(). |
39 | * |
40 | * page_pool_put_page() may be called multiple times on the same page if a page |
41 | * is split into multiple fragments. For the last fragment, it will either |
42 | * recycle the page, or in case of page->_refcount > 1, it will release the DMA |
43 | * mapping and in-flight state accounting. |
44 | * |
45 | * dma_sync_single_range_for_device() is only called for the last fragment when |
46 | * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the |
47 | * last freed fragment to do the sync_for_device operation for all fragments in |
48 | * the same page when a page is split. The API user must setup pool->p.max_len |
49 | * and pool->p.offset correctly and ensure that page_pool_put_page() is called |
50 | * with dma_sync_size being -1 for fragment API. |
51 | */ |
52 | #ifndef _NET_PAGE_POOL_HELPERS_H |
53 | #define _NET_PAGE_POOL_HELPERS_H |
54 | |
55 | #include <linux/dma-mapping.h> |
56 | |
57 | #include <net/page_pool/types.h> |
58 | #include <net/net_debug.h> |
59 | #include <net/netmem.h> |
60 | |
61 | #ifdef CONFIG_PAGE_POOL_STATS |
62 | /* Deprecated driver-facing API, use netlink instead */ |
63 | int page_pool_ethtool_stats_get_count(void); |
64 | u8 *page_pool_ethtool_stats_get_strings(u8 *data); |
65 | u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); |
66 | |
67 | bool page_pool_get_stats(const struct page_pool *pool, |
68 | struct page_pool_stats *stats); |
69 | #else |
70 | static inline int page_pool_ethtool_stats_get_count(void) |
71 | { |
72 | return 0; |
73 | } |
74 | |
75 | static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) |
76 | { |
77 | return data; |
78 | } |
79 | |
80 | static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) |
81 | { |
82 | return data; |
83 | } |
84 | #endif |
85 | |
86 | /** |
87 | * page_pool_dev_alloc_pages() - allocate a page. |
88 | * @pool: pool from which to allocate |
89 | * |
90 | * Get a page from the page allocator or page_pool caches. |
91 | */ |
92 | static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) |
93 | { |
94 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
95 | |
96 | return page_pool_alloc_pages(pool, gfp); |
97 | } |
98 | |
99 | /** |
100 | * page_pool_dev_alloc_frag() - allocate a page fragment. |
101 | * @pool: pool from which to allocate |
102 | * @offset: offset to the allocated page |
103 | * @size: requested size |
104 | * |
105 | * Get a page fragment from the page allocator or page_pool caches. |
106 | * |
107 | * Return: allocated page fragment, otherwise return NULL. |
108 | */ |
109 | static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, |
110 | unsigned int *offset, |
111 | unsigned int size) |
112 | { |
113 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
114 | |
115 | return page_pool_alloc_frag(pool, offset, size, gfp); |
116 | } |
117 | |
118 | static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool, |
119 | unsigned int *offset, |
120 | unsigned int *size, gfp_t gfp) |
121 | { |
122 | unsigned int max_size = PAGE_SIZE << pool->p.order; |
123 | netmem_ref netmem; |
124 | |
125 | if ((*size << 1) > max_size) { |
126 | *size = max_size; |
127 | *offset = 0; |
128 | return page_pool_alloc_netmems(pool, gfp); |
129 | } |
130 | |
131 | netmem = page_pool_alloc_frag_netmem(pool, offset, size: *size, gfp); |
132 | if (unlikely(!netmem)) |
133 | return 0; |
134 | |
135 | /* There is very likely not enough space for another fragment, so append |
136 | * the remaining size to the current fragment to avoid truesize |
137 | * underestimate problem. |
138 | */ |
139 | if (pool->frag_offset + *size > max_size) { |
140 | *size = max_size - *offset; |
141 | pool->frag_offset = max_size; |
142 | } |
143 | |
144 | return netmem; |
145 | } |
146 | |
147 | static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool, |
148 | unsigned int *offset, |
149 | unsigned int *size) |
150 | { |
151 | gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; |
152 | |
153 | return page_pool_alloc_netmem(pool, offset, size, gfp); |
154 | } |
155 | |
156 | static inline struct page *page_pool_alloc(struct page_pool *pool, |
157 | unsigned int *offset, |
158 | unsigned int *size, gfp_t gfp) |
159 | { |
160 | return netmem_to_page(netmem: page_pool_alloc_netmem(pool, offset, size, gfp)); |
161 | } |
162 | |
163 | /** |
164 | * page_pool_dev_alloc() - allocate a page or a page fragment. |
165 | * @pool: pool from which to allocate |
166 | * @offset: offset to the allocated page |
167 | * @size: in as the requested size, out as the allocated size |
168 | * |
169 | * Get a page or a page fragment from the page allocator or page_pool caches |
170 | * depending on the requested size in order to allocate memory with least memory |
171 | * utilization and performance penalty. |
172 | * |
173 | * Return: allocated page or page fragment, otherwise return NULL. |
174 | */ |
175 | static inline struct page *page_pool_dev_alloc(struct page_pool *pool, |
176 | unsigned int *offset, |
177 | unsigned int *size) |
178 | { |
179 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
180 | |
181 | return page_pool_alloc(pool, offset, size, gfp); |
182 | } |
183 | |
184 | static inline void *page_pool_alloc_va(struct page_pool *pool, |
185 | unsigned int *size, gfp_t gfp) |
186 | { |
187 | unsigned int offset; |
188 | struct page *page; |
189 | |
190 | /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ |
191 | page = page_pool_alloc(pool, offset: &offset, size, gfp: gfp & ~__GFP_HIGHMEM); |
192 | if (unlikely(!page)) |
193 | return NULL; |
194 | |
195 | return page_address(page) + offset; |
196 | } |
197 | |
198 | /** |
199 | * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its |
200 | * va. |
201 | * @pool: pool from which to allocate |
202 | * @size: in as the requested size, out as the allocated size |
203 | * |
204 | * This is just a thin wrapper around the page_pool_alloc() API, and |
205 | * it returns va of the allocated page or page fragment. |
206 | * |
207 | * Return: the va for the allocated page or page fragment, otherwise return NULL. |
208 | */ |
209 | static inline void *page_pool_dev_alloc_va(struct page_pool *pool, |
210 | unsigned int *size) |
211 | { |
212 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
213 | |
214 | return page_pool_alloc_va(pool, size, gfp); |
215 | } |
216 | |
217 | /** |
218 | * page_pool_get_dma_dir() - Retrieve the stored DMA direction. |
219 | * @pool: pool from which page was allocated |
220 | * |
221 | * Get the stored dma direction. A driver might decide to store this locally |
222 | * and avoid the extra cache line from page_pool to determine the direction. |
223 | */ |
224 | static inline enum dma_data_direction |
225 | page_pool_get_dma_dir(const struct page_pool *pool) |
226 | { |
227 | return pool->p.dma_dir; |
228 | } |
229 | |
230 | static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) |
231 | { |
232 | atomic_long_set(v: netmem_get_pp_ref_count_ref(netmem), i: nr); |
233 | } |
234 | |
235 | /** |
236 | * page_pool_fragment_page() - split a fresh page into fragments |
237 | * @page: page to split |
238 | * @nr: references to set |
239 | * |
240 | * pp_ref_count represents the number of outstanding references to the page, |
241 | * which will be freed using page_pool APIs (rather than page allocator APIs |
242 | * like put_page()). Such references are usually held by page_pool-aware |
243 | * objects like skbs marked for page pool recycling. |
244 | * |
245 | * This helper allows the caller to take (set) multiple references to a |
246 | * freshly allocated page. The page must be freshly allocated (have a |
247 | * pp_ref_count of 1). This is commonly done by drivers and |
248 | * "fragment allocators" to save atomic operations - either when they know |
249 | * upfront how many references they will need; or to take MAX references and |
250 | * return the unused ones with a single atomic dec(), instead of performing |
251 | * multiple atomic inc() operations. |
252 | */ |
253 | static inline void page_pool_fragment_page(struct page *page, long nr) |
254 | { |
255 | page_pool_fragment_netmem(netmem: page_to_netmem(page), nr); |
256 | } |
257 | |
258 | static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) |
259 | { |
260 | atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem); |
261 | long ret; |
262 | |
263 | /* If nr == pp_ref_count then we have cleared all remaining |
264 | * references to the page: |
265 | * 1. 'n == 1': no need to actually overwrite it. |
266 | * 2. 'n != 1': overwrite it with one, which is the rare case |
267 | * for pp_ref_count draining. |
268 | * |
269 | * The main advantage to doing this is that not only we avoid a atomic |
270 | * update, as an atomic_read is generally a much cheaper operation than |
271 | * an atomic update, especially when dealing with a page that may be |
272 | * referenced by only 2 or 3 users; but also unify the pp_ref_count |
273 | * handling by ensuring all pages have partitioned into only 1 piece |
274 | * initially, and only overwrite it when the page is partitioned into |
275 | * more than one piece. |
276 | */ |
277 | if (atomic_long_read(v: pp_ref_count) == nr) { |
278 | /* As we have ensured nr is always one for constant case using |
279 | * the BUILD_BUG_ON(), only need to handle the non-constant case |
280 | * here for pp_ref_count draining, which is a rare case. |
281 | */ |
282 | BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); |
283 | if (!__builtin_constant_p(nr)) |
284 | atomic_long_set(v: pp_ref_count, i: 1); |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | ret = atomic_long_sub_return(i: nr, v: pp_ref_count); |
290 | WARN_ON(ret < 0); |
291 | |
292 | /* We are the last user here too, reset pp_ref_count back to 1 to |
293 | * ensure all pages have been partitioned into 1 piece initially, |
294 | * this should be the rare case when the last two fragment users call |
295 | * page_pool_unref_page() currently. |
296 | */ |
297 | if (unlikely(!ret)) |
298 | atomic_long_set(v: pp_ref_count, i: 1); |
299 | |
300 | return ret; |
301 | } |
302 | |
303 | static inline long page_pool_unref_page(struct page *page, long nr) |
304 | { |
305 | return page_pool_unref_netmem(netmem: page_to_netmem(page), nr); |
306 | } |
307 | |
308 | static inline void page_pool_ref_netmem(netmem_ref netmem) |
309 | { |
310 | atomic_long_inc(v: netmem_get_pp_ref_count_ref(netmem)); |
311 | } |
312 | |
313 | static inline void page_pool_ref_page(struct page *page) |
314 | { |
315 | page_pool_ref_netmem(netmem: page_to_netmem(page)); |
316 | } |
317 | |
318 | static inline bool page_pool_unref_and_test(netmem_ref netmem) |
319 | { |
320 | /* If page_pool_unref_page() returns 0, we were the last user */ |
321 | return page_pool_unref_netmem(netmem, nr: 1) == 0; |
322 | } |
323 | |
324 | static inline void page_pool_put_netmem(struct page_pool *pool, |
325 | netmem_ref netmem, |
326 | unsigned int dma_sync_size, |
327 | bool allow_direct) |
328 | { |
329 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't |
330 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. |
331 | */ |
332 | #ifdef CONFIG_PAGE_POOL |
333 | if (!page_pool_unref_and_test(netmem)) |
334 | return; |
335 | |
336 | page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); |
337 | #endif |
338 | } |
339 | |
340 | /** |
341 | * page_pool_put_page() - release a reference to a page pool page |
342 | * @pool: pool from which page was allocated |
343 | * @page: page to release a reference on |
344 | * @dma_sync_size: how much of the page may have been touched by the device |
345 | * @allow_direct: released by the consumer, allow lockless caching |
346 | * |
347 | * The outcome of this depends on the page refcnt. If the driver bumps |
348 | * the refcnt > 1 this will unmap the page. If the page refcnt is 1 |
349 | * the allocator owns the page and will try to recycle it in one of the pool |
350 | * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device |
351 | * using dma_sync_single_range_for_device(). |
352 | */ |
353 | static inline void page_pool_put_page(struct page_pool *pool, |
354 | struct page *page, |
355 | unsigned int dma_sync_size, |
356 | bool allow_direct) |
357 | { |
358 | page_pool_put_netmem(pool, netmem: page_to_netmem(page), dma_sync_size, |
359 | allow_direct); |
360 | } |
361 | |
362 | static inline void page_pool_put_full_netmem(struct page_pool *pool, |
363 | netmem_ref netmem, |
364 | bool allow_direct) |
365 | { |
366 | page_pool_put_netmem(pool, netmem, dma_sync_size: -1, allow_direct); |
367 | } |
368 | |
369 | /** |
370 | * page_pool_put_full_page() - release a reference on a page pool page |
371 | * @pool: pool from which page was allocated |
372 | * @page: page to release a reference on |
373 | * @allow_direct: released by the consumer, allow lockless caching |
374 | * |
375 | * Similar to page_pool_put_page(), but will DMA sync the entire memory area |
376 | * as configured in &page_pool_params.max_len. |
377 | */ |
378 | static inline void page_pool_put_full_page(struct page_pool *pool, |
379 | struct page *page, bool allow_direct) |
380 | { |
381 | page_pool_put_netmem(pool, netmem: page_to_netmem(page), dma_sync_size: -1, allow_direct); |
382 | } |
383 | |
384 | /** |
385 | * page_pool_recycle_direct() - release a reference on a page pool page |
386 | * @pool: pool from which page was allocated |
387 | * @page: page to release a reference on |
388 | * |
389 | * Similar to page_pool_put_full_page() but caller must guarantee safe context |
390 | * (e.g NAPI), since it will recycle the page directly into the pool fast cache. |
391 | */ |
392 | static inline void page_pool_recycle_direct(struct page_pool *pool, |
393 | struct page *page) |
394 | { |
395 | page_pool_put_full_page(pool, page, allow_direct: true); |
396 | } |
397 | |
398 | static inline void page_pool_recycle_direct_netmem(struct page_pool *pool, |
399 | netmem_ref netmem) |
400 | { |
401 | page_pool_put_full_netmem(pool, netmem, allow_direct: true); |
402 | } |
403 | |
404 | #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ |
405 | (sizeof(dma_addr_t) > sizeof(unsigned long)) |
406 | |
407 | /** |
408 | * page_pool_free_va() - free a va into the page_pool |
409 | * @pool: pool from which va was allocated |
410 | * @va: va to be freed |
411 | * @allow_direct: freed by the consumer, allow lockless caching |
412 | * |
413 | * Free a va allocated from page_pool_allo_va(). |
414 | */ |
415 | static inline void page_pool_free_va(struct page_pool *pool, void *va, |
416 | bool allow_direct) |
417 | { |
418 | page_pool_put_page(pool, page: virt_to_head_page(x: va), dma_sync_size: -1, allow_direct); |
419 | } |
420 | |
421 | static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) |
422 | { |
423 | dma_addr_t ret = netmem_get_dma_addr(netmem); |
424 | |
425 | if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) |
426 | ret <<= PAGE_SHIFT; |
427 | |
428 | return ret; |
429 | } |
430 | |
431 | /** |
432 | * page_pool_get_dma_addr() - Retrieve the stored DMA address. |
433 | * @page: page allocated from a page pool |
434 | * |
435 | * Fetch the DMA address of the page. The page pool to which the page belongs |
436 | * must had been created with PP_FLAG_DMA_MAP. |
437 | */ |
438 | static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) |
439 | { |
440 | dma_addr_t ret = page->dma_addr; |
441 | |
442 | if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) |
443 | ret <<= PAGE_SHIFT; |
444 | |
445 | return ret; |
446 | } |
447 | |
448 | static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool, |
449 | const dma_addr_t dma_addr, |
450 | u32 offset, u32 dma_sync_size) |
451 | { |
452 | dma_sync_single_range_for_cpu(dev: pool->p.dev, addr: dma_addr, |
453 | offset: offset + pool->p.offset, size: dma_sync_size, |
454 | dir: page_pool_get_dma_dir(pool)); |
455 | } |
456 | |
457 | /** |
458 | * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW |
459 | * @pool: &page_pool the @page belongs to |
460 | * @page: page to sync |
461 | * @offset: offset from page start to "hard" start if using PP frags |
462 | * @dma_sync_size: size of the data written to the page |
463 | * |
464 | * Can be used as a shorthand to sync Rx pages before accessing them in the |
465 | * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. |
466 | * Note that this version performs DMA sync unconditionally, even if the |
467 | * associated PP doesn't perform sync-for-device. |
468 | */ |
469 | static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, |
470 | const struct page *page, |
471 | u32 offset, u32 dma_sync_size) |
472 | { |
473 | __page_pool_dma_sync_for_cpu(pool, dma_addr: page_pool_get_dma_addr(page), offset, |
474 | dma_sync_size); |
475 | } |
476 | |
477 | static inline void |
478 | page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, |
479 | const netmem_ref netmem, u32 offset, |
480 | u32 dma_sync_size) |
481 | { |
482 | if (!pool->dma_sync_for_cpu) |
483 | return; |
484 | |
485 | __page_pool_dma_sync_for_cpu(pool, |
486 | dma_addr: page_pool_get_dma_addr_netmem(netmem), |
487 | offset, dma_sync_size); |
488 | } |
489 | |
490 | static inline bool page_pool_put(struct page_pool *pool) |
491 | { |
492 | return refcount_dec_and_test(r: &pool->user_cnt); |
493 | } |
494 | |
495 | static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) |
496 | { |
497 | if (unlikely(pool->p.nid != new_nid)) |
498 | page_pool_update_nid(pool, new_nid); |
499 | } |
500 | |
501 | static inline bool page_pool_is_unreadable(struct page_pool *pool) |
502 | { |
503 | return !!pool->mp_ops; |
504 | } |
505 | |
506 | #endif /* _NET_PAGE_POOL_HELPERS_H */ |
507 | |