1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2018 Facebook |
3 | |
4 | #include <stdio.h> |
5 | #include <stdlib.h> |
6 | #include <string.h> |
7 | #include <errno.h> |
8 | #include <fcntl.h> |
9 | #include <syscall.h> |
10 | #include <unistd.h> |
11 | #include <linux/perf_event.h> |
12 | #include <sys/ioctl.h> |
13 | #include <sys/time.h> |
14 | #include <sys/types.h> |
15 | #include <sys/stat.h> |
16 | |
17 | #include <linux/bpf.h> |
18 | #include <bpf/bpf.h> |
19 | #include <bpf/libbpf.h> |
20 | |
21 | #include "cgroup_helpers.h" |
22 | #include "testing_helpers.h" |
23 | |
24 | #define CHECK(condition, tag, format...) ({ \ |
25 | int __ret = !!(condition); \ |
26 | if (__ret) { \ |
27 | printf("%s:FAIL:%s ", __func__, tag); \ |
28 | printf(format); \ |
29 | } else { \ |
30 | printf("%s:PASS:%s\n", __func__, tag); \ |
31 | } \ |
32 | __ret; \ |
33 | }) |
34 | |
35 | static int bpf_find_map(const char *test, struct bpf_object *obj, |
36 | const char *name) |
37 | { |
38 | struct bpf_map *map; |
39 | |
40 | map = bpf_object__find_map_by_name(obj, name); |
41 | if (!map) |
42 | return -1; |
43 | return bpf_map__fd(map); |
44 | } |
45 | |
46 | #define TEST_CGROUP "/test-bpf-get-cgroup-id/" |
47 | |
48 | int main(int argc, char **argv) |
49 | { |
50 | const char *probe_name = "syscalls/sys_enter_nanosleep" ; |
51 | const char *file = "get_cgroup_id_kern.bpf.o" ; |
52 | int err, bytes, efd, prog_fd, pmu_fd; |
53 | int cgroup_fd, cgidmap_fd, pidmap_fd; |
54 | struct perf_event_attr attr = {}; |
55 | struct bpf_object *obj; |
56 | __u64 kcgid = 0, ucgid; |
57 | __u32 key = 0, pid; |
58 | int exit_code = 1; |
59 | char buf[256]; |
60 | const struct timespec req = { |
61 | .tv_sec = 1, |
62 | .tv_nsec = 0, |
63 | }; |
64 | |
65 | cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); |
66 | if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join" , "err %d errno %d\n" , cgroup_fd, errno)) |
67 | return 1; |
68 | |
69 | /* Use libbpf 1.0 API mode */ |
70 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); |
71 | |
72 | err = bpf_prog_test_load(file, type: BPF_PROG_TYPE_TRACEPOINT, pobj: &obj, prog_fd: &prog_fd); |
73 | if (CHECK(err, "bpf_prog_test_load" , "err %d errno %d\n" , err, errno)) |
74 | goto cleanup_cgroup_env; |
75 | |
76 | cgidmap_fd = bpf_find_map(test: __func__, obj, name: "cg_ids" ); |
77 | if (CHECK(cgidmap_fd < 0, "bpf_find_map" , "err %d errno %d\n" , |
78 | cgidmap_fd, errno)) |
79 | goto close_prog; |
80 | |
81 | pidmap_fd = bpf_find_map(test: __func__, obj, name: "pidmap" ); |
82 | if (CHECK(pidmap_fd < 0, "bpf_find_map" , "err %d errno %d\n" , |
83 | pidmap_fd, errno)) |
84 | goto close_prog; |
85 | |
86 | pid = getpid(); |
87 | bpf_map_update_elem(pidmap_fd, &key, &pid, 0); |
88 | |
89 | if (access("/sys/kernel/tracing/trace" , F_OK) == 0) { |
90 | snprintf(buf, size: sizeof(buf), |
91 | fmt: "/sys/kernel/tracing/events/%s/id" , probe_name); |
92 | } else { |
93 | snprintf(buf, size: sizeof(buf), |
94 | fmt: "/sys/kernel/debug/tracing/events/%s/id" , probe_name); |
95 | } |
96 | efd = open(buf, O_RDONLY, 0); |
97 | if (CHECK(efd < 0, "open" , "err %d errno %d\n" , efd, errno)) |
98 | goto close_prog; |
99 | bytes = read(efd, buf, sizeof(buf)); |
100 | close(efd); |
101 | if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read" , |
102 | "bytes %d errno %d\n" , bytes, errno)) |
103 | goto close_prog; |
104 | |
105 | attr.config = strtol(buf, NULL, 0); |
106 | attr.type = PERF_TYPE_TRACEPOINT; |
107 | attr.sample_type = PERF_SAMPLE_RAW; |
108 | attr.sample_period = 1; |
109 | attr.wakeup_events = 1; |
110 | |
111 | /* attach to this pid so the all bpf invocations will be in the |
112 | * cgroup associated with this pid. |
113 | */ |
114 | pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0); |
115 | if (CHECK(pmu_fd < 0, "perf_event_open" , "err %d errno %d\n" , pmu_fd, |
116 | errno)) |
117 | goto close_prog; |
118 | |
119 | err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); |
120 | if (CHECK(err, "perf_event_ioc_enable" , "err %d errno %d\n" , err, |
121 | errno)) |
122 | goto close_pmu; |
123 | |
124 | err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); |
125 | if (CHECK(err, "perf_event_ioc_set_bpf" , "err %d errno %d\n" , err, |
126 | errno)) |
127 | goto close_pmu; |
128 | |
129 | /* trigger some syscalls */ |
130 | syscall(__NR_nanosleep, &req, NULL); |
131 | |
132 | err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid); |
133 | if (CHECK(err, "bpf_map_lookup_elem" , "err %d errno %d\n" , err, errno)) |
134 | goto close_pmu; |
135 | |
136 | ucgid = get_cgroup_id(TEST_CGROUP); |
137 | if (CHECK(kcgid != ucgid, "compare_cgroup_id" , |
138 | "kern cgid %llx user cgid %llx" , kcgid, ucgid)) |
139 | goto close_pmu; |
140 | |
141 | exit_code = 0; |
142 | printf("%s:PASS\n" , argv[0]); |
143 | |
144 | close_pmu: |
145 | close(pmu_fd); |
146 | close_prog: |
147 | bpf_object__close(obj); |
148 | cleanup_cgroup_env: |
149 | cleanup_cgroup_environment(); |
150 | return exit_code; |
151 | } |
152 | |