1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BPF_CGROUP_H |
3 | #define _BPF_CGROUP_H |
4 | |
5 | #include <linux/bpf.h> |
6 | #include <linux/bpf-cgroup-defs.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/jump_label.h> |
9 | #include <linux/percpu.h> |
10 | #include <linux/rbtree.h> |
11 | #include <net/sock.h> |
12 | #include <uapi/linux/bpf.h> |
13 | |
14 | struct sock; |
15 | struct sockaddr; |
16 | struct cgroup; |
17 | struct sk_buff; |
18 | struct bpf_map; |
19 | struct bpf_prog; |
20 | struct bpf_sock_ops_kern; |
21 | struct bpf_cgroup_storage; |
22 | struct ctl_table; |
23 | struct ; |
24 | struct task_struct; |
25 | |
26 | unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx, |
27 | const struct bpf_insn *insn); |
28 | unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx, |
29 | const struct bpf_insn *insn); |
30 | unsigned int __cgroup_bpf_run_lsm_current(const void *ctx, |
31 | const struct bpf_insn *insn); |
32 | |
33 | #ifdef CONFIG_CGROUP_BPF |
34 | |
35 | #define CGROUP_ATYPE(type) \ |
36 | case BPF_##type: return type |
37 | |
38 | static inline enum cgroup_bpf_attach_type |
39 | to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) |
40 | { |
41 | switch (attach_type) { |
42 | CGROUP_ATYPE(CGROUP_INET_INGRESS); |
43 | CGROUP_ATYPE(CGROUP_INET_EGRESS); |
44 | CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); |
45 | CGROUP_ATYPE(CGROUP_SOCK_OPS); |
46 | CGROUP_ATYPE(CGROUP_DEVICE); |
47 | CGROUP_ATYPE(CGROUP_INET4_BIND); |
48 | CGROUP_ATYPE(CGROUP_INET6_BIND); |
49 | CGROUP_ATYPE(CGROUP_INET4_CONNECT); |
50 | CGROUP_ATYPE(CGROUP_INET6_CONNECT); |
51 | CGROUP_ATYPE(CGROUP_INET4_POST_BIND); |
52 | CGROUP_ATYPE(CGROUP_INET6_POST_BIND); |
53 | CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); |
54 | CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); |
55 | CGROUP_ATYPE(CGROUP_SYSCTL); |
56 | CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); |
57 | CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); |
58 | CGROUP_ATYPE(CGROUP_GETSOCKOPT); |
59 | CGROUP_ATYPE(CGROUP_SETSOCKOPT); |
60 | CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); |
61 | CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); |
62 | CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); |
63 | CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); |
64 | CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); |
65 | default: |
66 | return CGROUP_BPF_ATTACH_TYPE_INVALID; |
67 | } |
68 | } |
69 | |
70 | #undef CGROUP_ATYPE |
71 | |
72 | extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; |
73 | #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) |
74 | |
75 | #define for_each_cgroup_storage_type(stype) \ |
76 | for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) |
77 | |
78 | struct bpf_cgroup_storage_map; |
79 | |
80 | struct bpf_storage_buffer { |
81 | struct rcu_head rcu; |
82 | char data[]; |
83 | }; |
84 | |
85 | struct bpf_cgroup_storage { |
86 | union { |
87 | struct bpf_storage_buffer *buf; |
88 | void __percpu *percpu_buf; |
89 | }; |
90 | struct bpf_cgroup_storage_map *map; |
91 | struct bpf_cgroup_storage_key key; |
92 | struct list_head list_map; |
93 | struct list_head list_cg; |
94 | struct rb_node node; |
95 | struct rcu_head rcu; |
96 | }; |
97 | |
98 | struct bpf_cgroup_link { |
99 | struct bpf_link link; |
100 | struct cgroup *cgroup; |
101 | enum bpf_attach_type type; |
102 | }; |
103 | |
104 | struct bpf_prog_list { |
105 | struct hlist_node node; |
106 | struct bpf_prog *prog; |
107 | struct bpf_cgroup_link *link; |
108 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
109 | }; |
110 | |
111 | int cgroup_bpf_inherit(struct cgroup *cgrp); |
112 | void cgroup_bpf_offline(struct cgroup *cgrp); |
113 | |
114 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
115 | struct sk_buff *skb, |
116 | enum cgroup_bpf_attach_type atype); |
117 | |
118 | int __cgroup_bpf_run_filter_sk(struct sock *sk, |
119 | enum cgroup_bpf_attach_type atype); |
120 | |
121 | int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, |
122 | struct sockaddr *uaddr, |
123 | enum cgroup_bpf_attach_type atype, |
124 | void *t_ctx, |
125 | u32 *flags); |
126 | |
127 | int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, |
128 | struct bpf_sock_ops_kern *sock_ops, |
129 | enum cgroup_bpf_attach_type atype); |
130 | |
131 | int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, |
132 | short access, enum cgroup_bpf_attach_type atype); |
133 | |
134 | int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, |
135 | struct ctl_table *table, int write, |
136 | char **buf, size_t *pcount, loff_t *ppos, |
137 | enum cgroup_bpf_attach_type atype); |
138 | |
139 | int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, |
140 | int *optname, char __user *optval, |
141 | int *optlen, char **kernel_optval); |
142 | int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, |
143 | int optname, char __user *optval, |
144 | int __user *optlen, int max_optlen, |
145 | int retval); |
146 | |
147 | int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, |
148 | int optname, void *optval, |
149 | int *optlen, int retval); |
150 | |
151 | static inline enum bpf_cgroup_storage_type cgroup_storage_type( |
152 | struct bpf_map *map) |
153 | { |
154 | if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) |
155 | return BPF_CGROUP_STORAGE_PERCPU; |
156 | |
157 | return BPF_CGROUP_STORAGE_SHARED; |
158 | } |
159 | |
160 | struct bpf_cgroup_storage * |
161 | cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, |
162 | void *key, bool locked); |
163 | struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, |
164 | enum bpf_cgroup_storage_type stype); |
165 | void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); |
166 | void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, |
167 | struct cgroup *cgroup, |
168 | enum bpf_attach_type type); |
169 | void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); |
170 | int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); |
171 | |
172 | int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); |
173 | int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, |
174 | void *value, u64 flags); |
175 | |
176 | /* Opportunistic check to see whether we have any BPF program attached*/ |
177 | static inline bool cgroup_bpf_sock_enabled(struct sock *sk, |
178 | enum cgroup_bpf_attach_type type) |
179 | { |
180 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); |
181 | struct bpf_prog_array *array; |
182 | |
183 | array = rcu_access_pointer(cgrp->bpf.effective[type]); |
184 | return array != &bpf_empty_prog_array.hdr; |
185 | } |
186 | |
187 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ |
188 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ |
189 | ({ \ |
190 | int __ret = 0; \ |
191 | if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \ |
192 | cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \ |
193 | __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ |
194 | CGROUP_INET_INGRESS); \ |
195 | \ |
196 | __ret; \ |
197 | }) |
198 | |
199 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ |
200 | ({ \ |
201 | int __ret = 0; \ |
202 | if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ |
203 | typeof(sk) __sk = sk_to_full_sk(sk); \ |
204 | if (sk_fullsock(__sk) && \ |
205 | cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ |
206 | __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ |
207 | CGROUP_INET_EGRESS); \ |
208 | } \ |
209 | __ret; \ |
210 | }) |
211 | |
212 | #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ |
213 | ({ \ |
214 | int __ret = 0; \ |
215 | if (cgroup_bpf_enabled(atype)) { \ |
216 | __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ |
217 | } \ |
218 | __ret; \ |
219 | }) |
220 | |
221 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ |
222 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) |
223 | |
224 | #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ |
225 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) |
226 | |
227 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ |
228 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) |
229 | |
230 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ |
231 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) |
232 | |
233 | #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ |
234 | ({ \ |
235 | int __ret = 0; \ |
236 | if (cgroup_bpf_enabled(atype)) \ |
237 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ |
238 | NULL, NULL); \ |
239 | __ret; \ |
240 | }) |
241 | |
242 | #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ |
243 | ({ \ |
244 | int __ret = 0; \ |
245 | if (cgroup_bpf_enabled(atype)) { \ |
246 | lock_sock(sk); \ |
247 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ |
248 | t_ctx, NULL); \ |
249 | release_sock(sk); \ |
250 | } \ |
251 | __ret; \ |
252 | }) |
253 | |
254 | /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags |
255 | * via upper bits of return code. The only flag that is supported |
256 | * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check |
257 | * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). |
258 | */ |
259 | #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ |
260 | ({ \ |
261 | u32 __flags = 0; \ |
262 | int __ret = 0; \ |
263 | if (cgroup_bpf_enabled(atype)) { \ |
264 | lock_sock(sk); \ |
265 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ |
266 | NULL, &__flags); \ |
267 | release_sock(sk); \ |
268 | if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ |
269 | *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \ |
270 | } \ |
271 | __ret; \ |
272 | }) |
273 | |
274 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ |
275 | ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ |
276 | cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ |
277 | (sk)->sk_prot->pre_connect) |
278 | |
279 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ |
280 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) |
281 | |
282 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ |
283 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) |
284 | |
285 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ |
286 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) |
287 | |
288 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ |
289 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) |
290 | |
291 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ |
292 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) |
293 | |
294 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ |
295 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) |
296 | |
297 | #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ |
298 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) |
299 | |
300 | #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ |
301 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) |
302 | |
303 | /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a |
304 | * fullsock and its parent fullsock cannot be traced by |
305 | * sk_to_full_sk(). |
306 | * |
307 | * e.g. sock_ops->sk is a request_sock and it is under syncookie mode. |
308 | * Its listener-sk is not attached to the rsk_listener. |
309 | * In this case, the caller holds the listener-sk (unlocked), |
310 | * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with |
311 | * the listener-sk such that the cgroup-bpf-progs of the |
312 | * listener-sk will be run. |
313 | * |
314 | * Regardless of syncookie mode or not, |
315 | * calling bpf_setsockopt on listener-sk will not make sense anyway, |
316 | * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here. |
317 | */ |
318 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ |
319 | ({ \ |
320 | int __ret = 0; \ |
321 | if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ |
322 | __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ |
323 | sock_ops, \ |
324 | CGROUP_SOCK_OPS); \ |
325 | __ret; \ |
326 | }) |
327 | |
328 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ |
329 | ({ \ |
330 | int __ret = 0; \ |
331 | if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ |
332 | typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ |
333 | if (__sk && sk_fullsock(__sk)) \ |
334 | __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ |
335 | sock_ops, \ |
336 | CGROUP_SOCK_OPS); \ |
337 | } \ |
338 | __ret; \ |
339 | }) |
340 | |
341 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ |
342 | ({ \ |
343 | int __ret = 0; \ |
344 | if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ |
345 | __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ |
346 | access, \ |
347 | CGROUP_DEVICE); \ |
348 | \ |
349 | __ret; \ |
350 | }) |
351 | |
352 | |
353 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ |
354 | ({ \ |
355 | int __ret = 0; \ |
356 | if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ |
357 | __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ |
358 | buf, count, pos, \ |
359 | CGROUP_SYSCTL); \ |
360 | __ret; \ |
361 | }) |
362 | |
363 | #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ |
364 | kernel_optval) \ |
365 | ({ \ |
366 | int __ret = 0; \ |
367 | if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \ |
368 | cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \ |
369 | __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ |
370 | optname, optval, \ |
371 | optlen, \ |
372 | kernel_optval); \ |
373 | __ret; \ |
374 | }) |
375 | |
376 | #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ |
377 | ({ \ |
378 | int __ret = 0; \ |
379 | if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ |
380 | get_user(__ret, optlen); \ |
381 | __ret; \ |
382 | }) |
383 | |
384 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ |
385 | max_optlen, retval) \ |
386 | ({ \ |
387 | int __ret = retval; \ |
388 | if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \ |
389 | cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \ |
390 | if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ |
391 | !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ |
392 | tcp_bpf_bypass_getsockopt, \ |
393 | level, optname)) \ |
394 | __ret = __cgroup_bpf_run_filter_getsockopt( \ |
395 | sock, level, optname, optval, optlen, \ |
396 | max_optlen, retval); \ |
397 | __ret; \ |
398 | }) |
399 | |
400 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ |
401 | optlen, retval) \ |
402 | ({ \ |
403 | int __ret = retval; \ |
404 | if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ |
405 | __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ |
406 | sock, level, optname, optval, optlen, retval); \ |
407 | __ret; \ |
408 | }) |
409 | |
410 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
411 | enum bpf_prog_type ptype, struct bpf_prog *prog); |
412 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
413 | enum bpf_prog_type ptype); |
414 | int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
415 | int cgroup_bpf_prog_query(const union bpf_attr *attr, |
416 | union bpf_attr __user *uattr); |
417 | #else |
418 | |
419 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } |
420 | static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} |
421 | |
422 | static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
423 | enum bpf_prog_type ptype, |
424 | struct bpf_prog *prog) |
425 | { |
426 | return -EINVAL; |
427 | } |
428 | |
429 | static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
430 | enum bpf_prog_type ptype) |
431 | { |
432 | return -EINVAL; |
433 | } |
434 | |
435 | static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, |
436 | struct bpf_prog *prog) |
437 | { |
438 | return -EINVAL; |
439 | } |
440 | |
441 | static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, |
442 | union bpf_attr __user *uattr) |
443 | { |
444 | return -EINVAL; |
445 | } |
446 | |
447 | static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, |
448 | struct bpf_map *map) { return 0; } |
449 | static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( |
450 | struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } |
451 | static inline void bpf_cgroup_storage_free( |
452 | struct bpf_cgroup_storage *storage) {} |
453 | static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, |
454 | void *value) { |
455 | return 0; |
456 | } |
457 | static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, |
458 | void *key, void *value, u64 flags) { |
459 | return 0; |
460 | } |
461 | |
462 | #define cgroup_bpf_enabled(atype) (0) |
463 | #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) |
464 | #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; }) |
465 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) |
466 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
467 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) |
468 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) |
469 | #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) |
470 | #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) |
471 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) |
472 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) |
473 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) |
474 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) |
475 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) |
476 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) |
477 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) |
478 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) |
479 | #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) |
480 | #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) |
481 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) |
482 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) |
483 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) |
484 | #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) |
485 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ |
486 | optlen, max_optlen, retval) ({ retval; }) |
487 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ |
488 | optlen, retval) ({ retval; }) |
489 | #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ |
490 | kernel_optval) ({ 0; }) |
491 | |
492 | #define for_each_cgroup_storage_type(stype) for (; false; ) |
493 | |
494 | #endif /* CONFIG_CGROUP_BPF */ |
495 | |
496 | #endif /* _BPF_CGROUP_H */ |
497 | |