1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
4 | */ |
5 | |
6 | #include "queueing.h" |
7 | #include <linux/skb_array.h> |
8 | |
9 | struct multicore_worker __percpu * |
10 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) |
11 | { |
12 | int cpu; |
13 | struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); |
14 | |
15 | if (!worker) |
16 | return NULL; |
17 | |
18 | for_each_possible_cpu(cpu) { |
19 | per_cpu_ptr(worker, cpu)->ptr = ptr; |
20 | INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); |
21 | } |
22 | return worker; |
23 | } |
24 | |
25 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
26 | unsigned int len) |
27 | { |
28 | int ret; |
29 | |
30 | memset(queue, 0, sizeof(*queue)); |
31 | queue->last_cpu = -1; |
32 | ret = ptr_ring_init(r: &queue->ring, size: len, GFP_KERNEL); |
33 | if (ret) |
34 | return ret; |
35 | queue->worker = wg_packet_percpu_multicore_worker_alloc(function, ptr: queue); |
36 | if (!queue->worker) { |
37 | ptr_ring_cleanup(r: &queue->ring, NULL); |
38 | return -ENOMEM; |
39 | } |
40 | return 0; |
41 | } |
42 | |
43 | void wg_packet_queue_free(struct crypt_queue *queue, bool purge) |
44 | { |
45 | free_percpu(pdata: queue->worker); |
46 | WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); |
47 | ptr_ring_cleanup(r: &queue->ring, destroy: purge ? __skb_array_destroy_skb : NULL); |
48 | } |
49 | |
50 | #define NEXT(skb) ((skb)->prev) |
51 | #define STUB(queue) ((struct sk_buff *)&queue->empty) |
52 | |
53 | void wg_prev_queue_init(struct prev_queue *queue) |
54 | { |
55 | NEXT(STUB(queue)) = NULL; |
56 | queue->head = queue->tail = STUB(queue); |
57 | queue->peeked = NULL; |
58 | atomic_set(v: &queue->count, i: 0); |
59 | BUILD_BUG_ON( |
60 | offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - |
61 | offsetof(struct prev_queue, empty) || |
62 | offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - |
63 | offsetof(struct prev_queue, empty)); |
64 | } |
65 | |
66 | static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
67 | { |
68 | WRITE_ONCE(NEXT(skb), NULL); |
69 | WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); |
70 | } |
71 | |
72 | bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
73 | { |
74 | if (!atomic_add_unless(v: &queue->count, a: 1, u: MAX_QUEUED_PACKETS)) |
75 | return false; |
76 | __wg_prev_queue_enqueue(queue, skb); |
77 | return true; |
78 | } |
79 | |
80 | struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) |
81 | { |
82 | struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); |
83 | |
84 | if (tail == STUB(queue)) { |
85 | if (!next) |
86 | return NULL; |
87 | queue->tail = next; |
88 | tail = next; |
89 | next = smp_load_acquire(&NEXT(next)); |
90 | } |
91 | if (next) { |
92 | queue->tail = next; |
93 | atomic_dec(v: &queue->count); |
94 | return tail; |
95 | } |
96 | if (tail != READ_ONCE(queue->head)) |
97 | return NULL; |
98 | __wg_prev_queue_enqueue(queue, STUB(queue)); |
99 | next = smp_load_acquire(&NEXT(tail)); |
100 | if (next) { |
101 | queue->tail = next; |
102 | atomic_dec(v: &queue->count); |
103 | return tail; |
104 | } |
105 | return NULL; |
106 | } |
107 | |
108 | #undef NEXT |
109 | #undef STUB |
110 | |